mirror of https://github.com/oxen-io/session-ios
Merge branch 'dev' into onboarding
commit
526172243b
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,85 +0,0 @@
|
|||||||
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
|
|
||||||
|
|
||||||
import Foundation
|
|
||||||
import GRDB
|
|
||||||
import SessionUtilitiesKit
|
|
||||||
|
|
||||||
public final class ConfigurationMessage: ControlMessage {
|
|
||||||
private enum CodingKeys: String, CodingKey {
|
|
||||||
case displayName
|
|
||||||
case profilePictureUrl
|
|
||||||
case profileKey
|
|
||||||
}
|
|
||||||
|
|
||||||
public var displayName: String?
|
|
||||||
public var profilePictureUrl: String?
|
|
||||||
public var profileKey: Data?
|
|
||||||
|
|
||||||
public override var isSelfSendValid: Bool { true }
|
|
||||||
|
|
||||||
// MARK: - Initialization
|
|
||||||
|
|
||||||
public init(
|
|
||||||
displayName: String?,
|
|
||||||
profilePictureUrl: String?,
|
|
||||||
profileKey: Data?
|
|
||||||
) {
|
|
||||||
super.init()
|
|
||||||
|
|
||||||
self.displayName = displayName
|
|
||||||
self.profilePictureUrl = profilePictureUrl
|
|
||||||
self.profileKey = profileKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// MARK: - Codable
|
|
||||||
|
|
||||||
required init(from decoder: Decoder) throws {
|
|
||||||
try super.init(from: decoder)
|
|
||||||
|
|
||||||
let container: KeyedDecodingContainer<CodingKeys> = try decoder.container(keyedBy: CodingKeys.self)
|
|
||||||
|
|
||||||
displayName = try? container.decode(String.self, forKey: .displayName)
|
|
||||||
profilePictureUrl = try? container.decode(String.self, forKey: .profilePictureUrl)
|
|
||||||
profileKey = try? container.decode(Data.self, forKey: .profileKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
public override func encode(to encoder: Encoder) throws {
|
|
||||||
try super.encode(to: encoder)
|
|
||||||
|
|
||||||
var container: KeyedEncodingContainer<CodingKeys> = encoder.container(keyedBy: CodingKeys.self)
|
|
||||||
|
|
||||||
try container.encodeIfPresent(displayName, forKey: .displayName)
|
|
||||||
try container.encodeIfPresent(profilePictureUrl, forKey: .profilePictureUrl)
|
|
||||||
try container.encodeIfPresent(profileKey, forKey: .profileKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MARK: - Proto Conversion
|
|
||||||
|
|
||||||
public override class func fromProto(_ proto: SNProtoContent, sender: String) -> ConfigurationMessage? {
|
|
||||||
guard let configurationProto = proto.configurationMessage else { return nil }
|
|
||||||
|
|
||||||
let displayName = configurationProto.displayName
|
|
||||||
let profilePictureUrl = configurationProto.profilePicture
|
|
||||||
let profileKey = configurationProto.profileKey
|
|
||||||
|
|
||||||
return ConfigurationMessage(
|
|
||||||
displayName: displayName,
|
|
||||||
profilePictureUrl: profilePictureUrl,
|
|
||||||
profileKey: profileKey
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
public override func toProto(_ db: Database, threadId: String) -> SNProtoContent? { return nil }
|
|
||||||
|
|
||||||
// MARK: - Description
|
|
||||||
|
|
||||||
public var description: String {
|
|
||||||
"""
|
|
||||||
LegacyConfigurationMessage(
|
|
||||||
displayName: \(displayName ?? "null"),
|
|
||||||
profilePictureUrl: \(profilePictureUrl ?? "null"),
|
|
||||||
profileKey: \(profileKey?.toHexString() ?? "null")
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
|
||||||
|
|
||||||
|
import Foundation
|
||||||
|
import GRDB
|
||||||
|
|
||||||
|
public final class LegacyConfigurationMessage: ControlMessage {
|
||||||
|
public override var isSelfSendValid: Bool { true }
|
||||||
|
|
||||||
|
public override class func fromProto(_ proto: SNProtoContent, sender: String) -> LegacyConfigurationMessage? {
|
||||||
|
guard proto.configurationMessage != nil else { return nil }
|
||||||
|
|
||||||
|
return LegacyConfigurationMessage()
|
||||||
|
}
|
||||||
|
|
||||||
|
public override func toProto(_ db: Database, threadId: String) -> SNProtoContent? { return nil }
|
||||||
|
public var description: String { "LegacyConfigurationMessage()" } // stringlint:disable
|
||||||
|
}
|
@ -1,68 +0,0 @@
|
|||||||
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
|
|
||||||
//
|
|
||||||
// stringlint:disable
|
|
||||||
|
|
||||||
import Foundation
|
|
||||||
|
|
||||||
public enum SSKLegacy {
|
|
||||||
// MARK: - Collections and Keys
|
|
||||||
|
|
||||||
internal static let swarmCollectionPrefix = "LokiSwarmCollection-"
|
|
||||||
internal static let lastSnodePoolRefreshDateKey = "lastSnodePoolRefreshDate"
|
|
||||||
internal static let snodePoolCollection = "LokiSnodePoolCollection"
|
|
||||||
internal static let onionRequestPathCollection = "LokiOnionRequestPathCollection"
|
|
||||||
internal static let lastSnodePoolRefreshDateCollection = "LokiLastSnodePoolRefreshDateCollection"
|
|
||||||
internal static let lastMessageHashCollection = "LokiLastMessageHashCollection"
|
|
||||||
internal static let receivedMessagesCollection = "LokiReceivedMessagesCollection"
|
|
||||||
|
|
||||||
// MARK: - Types
|
|
||||||
|
|
||||||
public typealias LegacyOnionRequestAPIPath = [Snode]
|
|
||||||
|
|
||||||
@objc(Snode)
|
|
||||||
public final class Snode: NSObject, NSCoding {
|
|
||||||
public let address: String
|
|
||||||
public let port: UInt16
|
|
||||||
public let publicKeySet: KeySet
|
|
||||||
|
|
||||||
// MARK: - Nested Types
|
|
||||||
|
|
||||||
public struct KeySet {
|
|
||||||
public let ed25519Key: String
|
|
||||||
public let x25519Key: String
|
|
||||||
}
|
|
||||||
|
|
||||||
// MARK: - NSCoding
|
|
||||||
|
|
||||||
public init?(coder: NSCoder) {
|
|
||||||
address = coder.decodeObject(forKey: "address") as! String
|
|
||||||
port = coder.decodeObject(forKey: "port") as! UInt16
|
|
||||||
|
|
||||||
guard
|
|
||||||
let idKey = coder.decodeObject(forKey: "idKey") as? String,
|
|
||||||
let encryptionKey = coder.decodeObject(forKey: "encryptionKey") as? String
|
|
||||||
else { return nil }
|
|
||||||
|
|
||||||
publicKeySet = KeySet(ed25519Key: idKey, x25519Key: encryptionKey)
|
|
||||||
|
|
||||||
super.init()
|
|
||||||
}
|
|
||||||
|
|
||||||
public func encode(with coder: NSCoder) {
|
|
||||||
fatalError("encode(with:) should never be called for legacy types")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: The 'isEqual' and 'hash' overrides are both needed to ensure the migration
|
|
||||||
// doesn't try to insert duplicate SNode entries into the new database (which would
|
|
||||||
// result in unique key constraint violations)
|
|
||||||
override public func isEqual(_ other: Any?) -> Bool {
|
|
||||||
guard let other = other as? Snode else { return false }
|
|
||||||
|
|
||||||
return address == other.address && port == other.port
|
|
||||||
}
|
|
||||||
|
|
||||||
override public var hash: Int {
|
|
||||||
return address.hashValue ^ port.hashValue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,218 +1,24 @@
|
|||||||
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
|
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
|
||||||
//
|
|
||||||
// stringlint:disable
|
|
||||||
|
|
||||||
import Foundation
|
import Foundation
|
||||||
import GRDB
|
import GRDB
|
||||||
import YapDatabase
|
|
||||||
import SessionUtilitiesKit
|
import SessionUtilitiesKit
|
||||||
|
|
||||||
enum _003_YDBToGRDBMigration: Migration {
|
enum _003_YDBToGRDBMigration: Migration {
|
||||||
static let target: TargetMigrations.Identifier = .snodeKit
|
static let target: TargetMigrations.Identifier = .snodeKit
|
||||||
static let identifier: String = "YDBToGRDBMigration"
|
static let identifier: String = "YDBToGRDBMigration" // stringlint:disable
|
||||||
static let needsConfigSync: Bool = false
|
static let needsConfigSync: Bool = false
|
||||||
static let fetchedTables: [(TableRecord & FetchableRecord).Type] = []
|
static let minExpectedRunDuration: TimeInterval = 0.1
|
||||||
|
static let fetchedTables: [(TableRecord & FetchableRecord).Type] = [Identity.self]
|
||||||
static let createdOrAlteredTables: [(TableRecord & FetchableRecord).Type] = []
|
static let createdOrAlteredTables: [(TableRecord & FetchableRecord).Type] = []
|
||||||
|
|
||||||
/// This migration can take a while if it's a very large database or there are lots of closed groups (want this to account
|
|
||||||
/// for about 10% of the progress bar so we intentionally have a higher `minExpectedRunDuration` so show more
|
|
||||||
/// progress during the migration)
|
|
||||||
static let minExpectedRunDuration: TimeInterval = 2.0
|
|
||||||
|
|
||||||
static func migrate(_ db: Database) throws {
|
static func migrate(_ db: Database) throws {
|
||||||
guard let dbConnection: YapDatabaseConnection = SUKLegacy.newDatabaseConnection() else {
|
guard
|
||||||
SNLogNotTests("[Migration Warning] No legacy database, skipping \(target.key(with: self))")
|
!SNUtilitiesKit.isRunningTests &&
|
||||||
return
|
Identity.userExists(db)
|
||||||
}
|
else { return Storage.update(progress: 1, for: self, in: target) }
|
||||||
|
|
||||||
// MARK: - Read from Legacy Database
|
|
||||||
|
|
||||||
// Note: Want to exclude the Snode's we already added from the 'onionRequestPathResult'
|
|
||||||
var snodeResult: Set<SSKLegacy.Snode> = []
|
|
||||||
var snodeSetResult: [String: Set<SSKLegacy.Snode>] = [:]
|
|
||||||
var lastSnodePoolRefreshDate: Date? = nil
|
|
||||||
var lastMessageResults: [String: (hash: String, json: JSON)] = [:]
|
|
||||||
var receivedMessageResults: [String: Set<String>] = [:]
|
|
||||||
|
|
||||||
// Map the Legacy types for the NSKeyedUnarchiver
|
|
||||||
NSKeyedUnarchiver.setClass(
|
|
||||||
SSKLegacy.Snode.self,
|
|
||||||
forClassName: "SessionSnodeKit.Snode"
|
|
||||||
)
|
|
||||||
|
|
||||||
dbConnection.read { transaction in
|
|
||||||
// MARK: --lastSnodePoolRefreshDate
|
|
||||||
|
|
||||||
lastSnodePoolRefreshDate = transaction.object(
|
|
||||||
forKey: SSKLegacy.lastSnodePoolRefreshDateKey,
|
|
||||||
inCollection: SSKLegacy.lastSnodePoolRefreshDateCollection
|
|
||||||
) as? Date
|
|
||||||
|
|
||||||
// MARK: --OnionRequestPaths
|
|
||||||
|
|
||||||
if
|
|
||||||
let path0Snode0 = transaction.object(forKey: "0-0", inCollection: SSKLegacy.onionRequestPathCollection) as? SSKLegacy.Snode,
|
|
||||||
let path0Snode1 = transaction.object(forKey: "0-1", inCollection: SSKLegacy.onionRequestPathCollection) as? SSKLegacy.Snode,
|
|
||||||
let path0Snode2 = transaction.object(forKey: "0-2", inCollection: SSKLegacy.onionRequestPathCollection) as? SSKLegacy.Snode
|
|
||||||
{
|
|
||||||
snodeResult.insert(path0Snode0)
|
|
||||||
snodeResult.insert(path0Snode1)
|
|
||||||
snodeResult.insert(path0Snode2)
|
|
||||||
snodeSetResult["\(SnodeSet.onionRequestPathPrefix)0"] = [ path0Snode0, path0Snode1, path0Snode2 ]
|
|
||||||
|
|
||||||
if
|
|
||||||
let path1Snode0 = transaction.object(forKey: "1-0", inCollection: SSKLegacy.onionRequestPathCollection) as? SSKLegacy.Snode,
|
|
||||||
let path1Snode1 = transaction.object(forKey: "1-1", inCollection: SSKLegacy.onionRequestPathCollection) as? SSKLegacy.Snode,
|
|
||||||
let path1Snode2 = transaction.object(forKey: "1-2", inCollection: SSKLegacy.onionRequestPathCollection) as? SSKLegacy.Snode
|
|
||||||
{
|
|
||||||
snodeResult.insert(path1Snode0)
|
|
||||||
snodeResult.insert(path1Snode1)
|
|
||||||
snodeResult.insert(path1Snode2)
|
|
||||||
snodeSetResult["\(SnodeSet.onionRequestPathPrefix)1"] = [ path1Snode0, path1Snode1, path1Snode2 ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Storage.update(progress: 0.02, for: self, in: target)
|
|
||||||
|
|
||||||
// MARK: --SnodePool
|
|
||||||
|
|
||||||
transaction.enumerateKeysAndObjects(inCollection: SSKLegacy.snodePoolCollection) { _, object, _ in
|
|
||||||
guard let snode = object as? SSKLegacy.Snode else { return }
|
|
||||||
snodeResult.insert(snode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MARK: --Swarms
|
|
||||||
|
|
||||||
/// **Note:** There is no index on the collection column so unfortunately it takes the same amount of time to enumerate through all
|
|
||||||
/// collections as it does to just get the count of collections, due to this, if the database is very large, importing thecollections can be
|
|
||||||
/// very slow (~15s with 2,000,000 rows) - we want to show some kind of progress while enumerating so the below code creates a
|
|
||||||
/// very rought guess of the number of collections based on the file size of the database (this shouldn't affect most users at all)
|
|
||||||
let roughMbPerCollection: CGFloat = 2.5
|
|
||||||
let oldDatabaseSizeBytes: CGFloat = (try? FileManager.default
|
|
||||||
.attributesOfItem(atPath: SUKLegacy.legacyDatabaseFilepath)[.size]
|
|
||||||
.asType(CGFloat.self))
|
|
||||||
.defaulting(to: 0)
|
|
||||||
let roughNumCollections: CGFloat = (((oldDatabaseSizeBytes / 1024) / 1024) / roughMbPerCollection)
|
|
||||||
let startProgress: CGFloat = 0.02
|
|
||||||
let swarmCompleteProgress: CGFloat = 0.90
|
|
||||||
var swarmCollections: Set<String> = []
|
|
||||||
var collectionIndex: CGFloat = 0
|
|
||||||
|
|
||||||
transaction.enumerateCollections { collectionName, _ in
|
|
||||||
if collectionName.starts(with: SSKLegacy.swarmCollectionPrefix) {
|
|
||||||
swarmCollections.insert(collectionName.substring(from: SSKLegacy.swarmCollectionPrefix.count))
|
|
||||||
}
|
|
||||||
|
|
||||||
collectionIndex += 1
|
|
||||||
|
|
||||||
Storage.update(
|
|
||||||
progress: min(
|
|
||||||
swarmCompleteProgress,
|
|
||||||
((collectionIndex / roughNumCollections) * (swarmCompleteProgress - startProgress))
|
|
||||||
),
|
|
||||||
for: self,
|
|
||||||
in: target
|
|
||||||
)
|
|
||||||
}
|
|
||||||
Storage.update(progress: swarmCompleteProgress, for: self, in: target)
|
|
||||||
|
|
||||||
for swarmCollection in swarmCollections {
|
|
||||||
let collection: String = "\(SSKLegacy.swarmCollectionPrefix)\(swarmCollection)"
|
|
||||||
|
|
||||||
transaction.enumerateKeysAndObjects(inCollection: collection) { _, object, _ in
|
|
||||||
guard let snode = object as? SSKLegacy.Snode else { return }
|
|
||||||
snodeResult.insert(snode)
|
|
||||||
snodeSetResult[swarmCollection] = (snodeSetResult[swarmCollection] ?? Set()).inserting(snode)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Storage.update(progress: 0.92, for: self, in: target)
|
|
||||||
|
|
||||||
// MARK: --Received message hashes
|
|
||||||
|
|
||||||
transaction.enumerateKeysAndObjects(inCollection: SSKLegacy.receivedMessagesCollection) { key, object, _ in
|
|
||||||
guard let hashSet = object as? Set<String> else { return }
|
|
||||||
receivedMessageResults[key] = hashSet
|
|
||||||
}
|
|
||||||
Storage.update(progress: 0.93, for: self, in: target)
|
|
||||||
|
|
||||||
// MARK: --Last message info
|
|
||||||
|
|
||||||
transaction.enumerateKeysAndObjects(inCollection: SSKLegacy.lastMessageHashCollection) { key, object, _ in
|
|
||||||
guard let lastMessageJson = object as? JSON else { return }
|
|
||||||
guard let lastMessageHash: String = lastMessageJson["hash"] as? String else { return }
|
|
||||||
|
|
||||||
// Note: We remove the value from 'receivedMessageResults' as we want to try and use
|
|
||||||
// it's actual 'expirationDate' value
|
|
||||||
lastMessageResults[key] = (lastMessageHash, lastMessageJson)
|
|
||||||
receivedMessageResults[key] = receivedMessageResults[key]?.removing(lastMessageHash)
|
|
||||||
}
|
|
||||||
Storage.update(progress: 0.94, for: self, in: target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MARK: - Insert into GRDB
|
|
||||||
|
|
||||||
try autoreleasepool {
|
|
||||||
// MARK: --lastSnodePoolRefreshDate
|
|
||||||
|
|
||||||
db[.lastSnodePoolRefreshDate] = lastSnodePoolRefreshDate
|
|
||||||
|
|
||||||
// MARK: --SnodePool
|
|
||||||
|
|
||||||
try snodeResult.forEach { legacySnode in
|
|
||||||
try Snode(
|
|
||||||
address: legacySnode.address,
|
|
||||||
port: legacySnode.port,
|
|
||||||
ed25519PublicKey: legacySnode.publicKeySet.ed25519Key,
|
|
||||||
x25519PublicKey: legacySnode.publicKeySet.x25519Key
|
|
||||||
).migrationSafeInsert(db)
|
|
||||||
}
|
|
||||||
Storage.update(progress: 0.96, for: self, in: target)
|
|
||||||
|
|
||||||
// MARK: --SnodeSets
|
|
||||||
|
|
||||||
try snodeSetResult.forEach { key, legacySnodeSet in
|
|
||||||
try legacySnodeSet.enumerated().forEach { nodeIndex, legacySnode in
|
|
||||||
// Note: In this case the 'nodeIndex' is irrelivant
|
|
||||||
try SnodeSet(
|
|
||||||
key: key,
|
|
||||||
nodeIndex: nodeIndex,
|
|
||||||
address: legacySnode.address,
|
|
||||||
port: legacySnode.port
|
|
||||||
).migrationSafeInsert(db)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Storage.update(progress: 0.98, for: self, in: target)
|
|
||||||
}
|
|
||||||
|
|
||||||
try autoreleasepool {
|
|
||||||
// MARK: --Received Messages
|
|
||||||
|
|
||||||
try receivedMessageResults.forEach { key, hashes in
|
|
||||||
try hashes.forEach { hash in
|
|
||||||
_ = try SnodeReceivedMessageInfo(
|
|
||||||
key: key,
|
|
||||||
hash: hash,
|
|
||||||
expirationDateMs: SnodeReceivedMessage.defaultExpirationSeconds
|
|
||||||
).migrationSafeInserted(db)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Storage.update(progress: 0.99, for: self, in: target)
|
|
||||||
|
|
||||||
// MARK: --Last Message Hash
|
|
||||||
|
|
||||||
try lastMessageResults.forEach { key, data in
|
|
||||||
let expirationDateMs: Int64 = ((data.json["expirationDate"] as? Int64) ?? 0)
|
|
||||||
|
|
||||||
_ = try SnodeReceivedMessageInfo(
|
|
||||||
key: key,
|
|
||||||
hash: data.hash,
|
|
||||||
expirationDateMs: (expirationDateMs > 0 ?
|
|
||||||
expirationDateMs :
|
|
||||||
SnodeReceivedMessage.defaultExpirationSeconds
|
|
||||||
)
|
|
||||||
).migrationSafeInserted(db)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration
|
SNLogNotTests("[Migration Error] Attempted to perform legacy migation")
|
||||||
|
throw StorageError.migrationNoLongerSupported
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,169 +0,0 @@
|
|||||||
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
|
|
||||||
//
|
|
||||||
// stringlint:disable
|
|
||||||
|
|
||||||
import Foundation
|
|
||||||
import YapDatabase
|
|
||||||
|
|
||||||
public enum SUKLegacy {
|
|
||||||
// MARK: - YapDatabase
|
|
||||||
|
|
||||||
private static let keychainService = "TSKeyChainService"
|
|
||||||
private static let keychainDBCipherKeySpec = "OWSDatabaseCipherKeySpec"
|
|
||||||
private static let sqlCipherKeySpecLength = 48
|
|
||||||
|
|
||||||
private static var database: Atomic<YapDatabase>?
|
|
||||||
|
|
||||||
// MARK: - Collections and Keys
|
|
||||||
|
|
||||||
internal static let userAccountRegisteredNumberKey = "TSStorageRegisteredNumberKey"
|
|
||||||
internal static let userAccountCollection = "TSStorageUserAccountCollection"
|
|
||||||
|
|
||||||
internal static let identityKeyStoreSeedKey = "LKLokiSeed"
|
|
||||||
internal static let identityKeyStoreEd25519SecretKey = "LKED25519SecretKey"
|
|
||||||
internal static let identityKeyStoreEd25519PublicKey = "LKED25519PublicKey"
|
|
||||||
internal static let identityKeyStoreIdentityKey = "TSStorageManagerIdentityKeyStoreIdentityKey"
|
|
||||||
internal static let identityKeyStoreCollection = "TSStorageManagerIdentityKeyStoreCollection"
|
|
||||||
|
|
||||||
// MARK: - Database Functions
|
|
||||||
|
|
||||||
public static var legacyDatabaseFilepath: String {
|
|
||||||
let sharedDirUrl: URL = URL(fileURLWithPath: OWSFileSystem.appSharedDataDirectoryPath())
|
|
||||||
|
|
||||||
return sharedDirUrl
|
|
||||||
.appendingPathComponent("database")
|
|
||||||
.appendingPathComponent("Signal.sqlite")
|
|
||||||
.path
|
|
||||||
}
|
|
||||||
|
|
||||||
private static let legacyDatabaseDeserializer: YapDatabaseDeserializer = {
|
|
||||||
return { (collection: String, key: String, data: Data) -> Any in
|
|
||||||
/// **Note:** The old `init(forReadingWith:)` method has been deprecated with `init(forReadingFrom:)`
|
|
||||||
/// and Apple changed the default of `requiresSecureCoding` to be true, this results in some of the types from failing
|
|
||||||
/// to decode, as a result we need to set it to false here
|
|
||||||
let unarchiver: NSKeyedUnarchiver? = try? NSKeyedUnarchiver(forReadingFrom: data)
|
|
||||||
unarchiver?.requiresSecureCoding = false
|
|
||||||
|
|
||||||
guard !data.isEmpty, let result = unarchiver?.decodeObject(forKey: "root") else {
|
|
||||||
return UnknownDBObject()
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
public static var hasLegacyDatabaseFile: Bool {
|
|
||||||
return FileManager.default.fileExists(atPath: legacyDatabaseFilepath)
|
|
||||||
}
|
|
||||||
|
|
||||||
@discardableResult public static func loadDatabaseIfNeeded() -> Bool {
|
|
||||||
guard SUKLegacy.database == nil else { return true }
|
|
||||||
|
|
||||||
/// Ensure the databaseKeySpec exists
|
|
||||||
var maybeKeyData: Data? = try? SSKDefaultKeychainStorage.shared.data(
|
|
||||||
forService: keychainService,
|
|
||||||
key: keychainDBCipherKeySpec
|
|
||||||
)
|
|
||||||
defer { if maybeKeyData != nil { maybeKeyData!.resetBytes(in: 0..<maybeKeyData!.count) } }
|
|
||||||
|
|
||||||
guard maybeKeyData != nil, maybeKeyData?.count == sqlCipherKeySpecLength else { return false }
|
|
||||||
|
|
||||||
// Setup the database options
|
|
||||||
let options: YapDatabaseOptions = YapDatabaseOptions()
|
|
||||||
options.corruptAction = .fail
|
|
||||||
options.enableMultiProcessSupport = true
|
|
||||||
options.cipherUnencryptedHeaderLength = kSqliteHeaderLength // Needed for iOS to support SQLite writes
|
|
||||||
options.legacyCipherCompatibilityVersion = 3 // Old DB was SQLCipher V3
|
|
||||||
options.cipherKeySpecBlock = {
|
|
||||||
/// To avoid holding the keySpec in memory too long we load it as needed, since we have already confirmed
|
|
||||||
/// it's existence we can force-try here (the database will crash if it's invalid anyway)
|
|
||||||
var keySpec: Data = try! SSKDefaultKeychainStorage.shared.data(
|
|
||||||
forService: keychainService,
|
|
||||||
key: keychainDBCipherKeySpec
|
|
||||||
)
|
|
||||||
defer { keySpec.resetBytes(in: 0..<keySpec.count) }
|
|
||||||
|
|
||||||
return keySpec
|
|
||||||
}
|
|
||||||
|
|
||||||
let maybeDatabase: YapDatabase? = YapDatabase(
|
|
||||||
path: legacyDatabaseFilepath,
|
|
||||||
serializer: nil,
|
|
||||||
deserializer: legacyDatabaseDeserializer,
|
|
||||||
options: options
|
|
||||||
)
|
|
||||||
|
|
||||||
guard let database: YapDatabase = maybeDatabase else { return false }
|
|
||||||
|
|
||||||
// Store the database instance atomically
|
|
||||||
SUKLegacy.database = Atomic(database)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
public static func newDatabaseConnection() -> YapDatabaseConnection? {
|
|
||||||
SUKLegacy.loadDatabaseIfNeeded()
|
|
||||||
|
|
||||||
return self.database?.wrappedValue.newConnection()
|
|
||||||
}
|
|
||||||
|
|
||||||
public static func clearLegacyDatabaseInstance() {
|
|
||||||
self.database = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
public static func deleteLegacyDatabaseFilesAndKey() throws {
|
|
||||||
OWSFileSystem.deleteFile(legacyDatabaseFilepath)
|
|
||||||
OWSFileSystem.deleteFile("\(legacyDatabaseFilepath)-shm")
|
|
||||||
OWSFileSystem.deleteFile("\(legacyDatabaseFilepath)-wal")
|
|
||||||
try SSKDefaultKeychainStorage.shared.remove(service: keychainService, key: keychainDBCipherKeySpec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MARK: - UnknownDBObject
|
|
||||||
|
|
||||||
@objc(LegacyUnknownDBObject)
|
|
||||||
public class UnknownDBObject: NSObject, NSCoding {
|
|
||||||
override public init() {}
|
|
||||||
public required init?(coder: NSCoder) {}
|
|
||||||
public func encode(with coder: NSCoder) { fatalError("Shouldn't be encoding this type") }
|
|
||||||
}
|
|
||||||
|
|
||||||
// MARK: - LagacyKeyPair
|
|
||||||
|
|
||||||
@objc(LegacyKeyPair)
|
|
||||||
public class KeyPair: NSObject, NSCoding {
|
|
||||||
private static let keyLength: Int = 32
|
|
||||||
private static let publicKeyKey: String = "TSECKeyPairPublicKey"
|
|
||||||
private static let privateKeyKey: String = "TSECKeyPairPrivateKey"
|
|
||||||
|
|
||||||
public let publicKey: Data
|
|
||||||
public let privateKey: Data
|
|
||||||
|
|
||||||
public init(
|
|
||||||
publicKeyData: Data,
|
|
||||||
privateKeyData: Data
|
|
||||||
) {
|
|
||||||
publicKey = publicKeyData
|
|
||||||
privateKey = privateKeyData
|
|
||||||
}
|
|
||||||
|
|
||||||
public required init?(coder: NSCoder) {
|
|
||||||
var pubKeyLength: Int = 0
|
|
||||||
var privKeyLength: Int = 0
|
|
||||||
|
|
||||||
guard
|
|
||||||
let pubKeyBytes: UnsafePointer<UInt8> = coder.decodeBytes(forKey: KeyPair.publicKeyKey, returnedLength: &pubKeyLength),
|
|
||||||
let privateKeyBytes: UnsafePointer<UInt8> = coder.decodeBytes(forKey: KeyPair.privateKeyKey, returnedLength: &privKeyLength),
|
|
||||||
pubKeyLength == KeyPair.keyLength,
|
|
||||||
privKeyLength == KeyPair.keyLength
|
|
||||||
else {
|
|
||||||
// Fail if the keys aren't the correct length
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
publicKey = Data(bytes: pubKeyBytes, count: pubKeyLength)
|
|
||||||
privateKey = Data(bytes: privateKeyBytes, count: privKeyLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
public func encode(with coder: NSCoder) { fatalError("Shouldn't be encoding this type") }
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue