// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved. // // stringlint:disable import Foundation import CryptoKit import Combine import GRDB import SignalCoreKit open class Storage { public static let queuePrefix: String = "SessionDatabase" private static let dbFileName: String = "Session.sqlite" private static let keychainService: String = "TSKeyChainService" private static let dbCipherKeySpecKey: String = "GRDBDatabaseCipherKeySpec" private static let kSQLCipherKeySpecLength: Int = 48 private static let writeWarningThreadshold: TimeInterval = 3 private static var sharedDatabaseDirectoryPath: String { "\(OWSFileSystem.appSharedDataDirectoryPath())/database" } private static var databasePath: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)" } private static var databasePathShm: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)-shm" } private static var databasePathWal: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)-wal" } public static var hasCreatedValidInstance: Bool { internalHasCreatedValidInstance.wrappedValue } public static var isDatabasePasswordAccessible: Bool { guard (try? getDatabaseCipherKeySpec()) != nil else { return false } return true } private var startupError: Error? private let migrationsCompleted: Atomic = Atomic(false) private static let internalHasCreatedValidInstance: Atomic = Atomic(false) internal let internalCurrentlyRunningMigration: Atomic<(identifier: TargetMigrations.Identifier, migration: Migration.Type)?> = Atomic(nil) public static let shared: Storage = Storage() public private(set) var isValid: Bool = false /// This property gets set when triggering the suspend/resume notifications for the database but `GRDB` will attempt to /// resume the suspention when it attempts to perform a write so it's possible for this to return a **false-positive** so /// this should be taken into consideration when used public private(set) var isSuspendedUnsafe: Bool = false /// This property gets set the first time we successfully read from the database public private(set) var hasSuccessfullyRead: Bool = false /// This property gets set the first time we successfully write to the database public private(set) var hasSuccessfullyWritten: Bool = false public var hasCompletedMigrations: Bool { migrationsCompleted.wrappedValue } public var currentlyRunningMigration: (identifier: TargetMigrations.Identifier, migration: Migration.Type)? { internalCurrentlyRunningMigration.wrappedValue } public static let defaultPublisherScheduler: ValueObservationScheduler = .async(onQueue: .main) fileprivate var dbWriter: DatabaseWriter? internal var testDbWriter: DatabaseWriter? { dbWriter } private var unprocessedMigrationRequirements: Atomic<[MigrationRequirement]> = Atomic(MigrationRequirement.allCases) private var migrationProgressUpdater: Atomic<((String, CGFloat) -> ())>? private var migrationRequirementProcesser: Atomic<(Database?, MigrationRequirement) -> ()>? // MARK: - Initialization public init(customWriter: DatabaseWriter? = nil) { configureDatabase(customWriter: customWriter) } private func configureDatabase(customWriter: DatabaseWriter? = nil) { // Create the database directory if needed and ensure it's protection level is set before attempting to // create the database KeySpec or the database itself OWSFileSystem.ensureDirectoryExists(Storage.sharedDatabaseDirectoryPath) OWSFileSystem.protectFileOrFolder(atPath: Storage.sharedDatabaseDirectoryPath) // If a custom writer was provided then use that (for unit testing) guard customWriter == nil else { dbWriter = customWriter isValid = true Storage.internalHasCreatedValidInstance.mutate { $0 = true } return } /// Generate the database KeySpec if needed (this MUST be done before we try to access the database as a different thread /// might attempt to access the database before the key is successfully created) /// /// We reset the bytes immediately after generation to ensure the database key doesn't hang around in memory unintentionally /// /// **Note:** If we fail to get/generate the keySpec then don't bother continuing to setup the Database as it'll just be invalid, /// in this case the App/Extensions will have logic that checks the `isValid` flag of the database do { var tmpKeySpec: Data = try Storage.getOrGenerateDatabaseKeySpec() tmpKeySpec.resetBytes(in: 0.. Set { let migrator: DatabaseMigrator = DatabaseMigrator() return (try? migrator.appliedIdentifiers(db)) .defaulting(to: []) } public static func sortedMigrationInfo(migrationTargets: [MigratableTarget.Type]) -> [KeyedMigration] { typealias MigrationInfo = (identifier: TargetMigrations.Identifier, migrations: TargetMigrations.MigrationSet) return migrationTargets .map { target -> TargetMigrations in target.migrations() } .sorted() .reduce(into: [[MigrationInfo]]()) { result, next in next.migrations.enumerated().forEach { index, migrationSet in if result.count <= index { result.append([]) } result[index] = (result[index] + [(next.identifier, migrationSet)]) } } .reduce(into: []) { result, next in next.forEach { identifier, migrations in result.append(contentsOf: migrations.map { (identifier.key(with: $0), identifier, $0) }) } } } public func perform( migrationTargets: [MigratableTarget.Type], async: Bool = true, onProgressUpdate: ((CGFloat, TimeInterval) -> ())?, onMigrationRequirement: @escaping (Database?, MigrationRequirement) -> (), onComplete: @escaping (Swift.Result, Bool) -> () ) { perform( sortedMigrations: Storage.sortedMigrationInfo(migrationTargets: migrationTargets), async: async, onProgressUpdate: onProgressUpdate, onMigrationRequirement: onMigrationRequirement, onComplete: onComplete ) } internal func perform( sortedMigrations: [KeyedMigration], async: Bool, onProgressUpdate: ((CGFloat, TimeInterval) -> ())?, onMigrationRequirement: @escaping (Database?, MigrationRequirement) -> (), onComplete: @escaping (Swift.Result, Bool) -> () ) { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { let error: Error = (startupError ?? StorageError.startupFailed) SNLog("[Database Error] Statup failed with error: \(error)") onComplete(.failure(error), false) return } // Setup and run any required migrations var migrator: DatabaseMigrator = DatabaseMigrator() sortedMigrations.forEach { _, identifier, migration in migrator.registerMigration(self, targetIdentifier: identifier, migration: migration) } // Determine which migrations need to be performed and gather the relevant settings needed to // inform the app of progress/states let completedMigrations: [String] = (try? dbWriter.read { db in try migrator.completedMigrations(db) }) .defaulting(to: []) let unperformedMigrations: [KeyedMigration] = sortedMigrations .reduce(into: []) { result, next in guard !completedMigrations.contains(next.key) else { return } result.append(next) } let migrationToDurationMap: [String: TimeInterval] = unperformedMigrations .reduce(into: [:]) { result, next in result[next.key] = next.migration.minExpectedRunDuration } let unperformedMigrationDurations: [TimeInterval] = unperformedMigrations .map { _, _, migration in migration.minExpectedRunDuration } let totalMinExpectedDuration: TimeInterval = migrationToDurationMap.values.reduce(0, +) let needsConfigSync: Bool = unperformedMigrations .contains(where: { _, _, migration in migration.needsConfigSync }) self.migrationProgressUpdater = Atomic({ targetKey, progress in guard let migrationIndex: Int = unperformedMigrations.firstIndex(where: { key, _, _ in key == targetKey }) else { return } let completedExpectedDuration: TimeInterval = ( (migrationIndex > 0 ? unperformedMigrationDurations[0..) -> () = { [weak self] result in // Process any unprocessed requirements which need to be processed before completion // then clear out the state let requirementProcessor: ((Database?, MigrationRequirement) -> ())? = self?.migrationRequirementProcesser?.wrappedValue let remainingMigrationRequirements: [MigrationRequirement] = (self?.unprocessedMigrationRequirements.wrappedValue .filter { $0.shouldProcessAtCompletionIfNotRequired }) .defaulting(to: []) self?.migrationsCompleted.mutate { $0 = true } self?.migrationProgressUpdater = nil self?.migrationRequirementProcesser = nil SUKLegacy.clearLegacyDatabaseInstance() // Process any remaining migration requirements if !remainingMigrationRequirements.isEmpty { self?.write { db in remainingMigrationRequirements.forEach { requirementProcessor?(db, $0) } } } // Reset in case there is a requirement on a migration which runs when returning from // the background self?.unprocessedMigrationRequirements.mutate { $0 = MigrationRequirement.allCases } // Don't log anything in the case of a 'success' or if the database is suspended (the // latter will happen if the user happens to return to the background too quickly on // launch so is unnecessarily alarming, it also gets caught and logged separately by // the 'write' functions anyway) switch result { case .success: break case .failure(DatabaseError.SQLITE_ABORT): break case .failure(let error): SNLog("[Migration Error] Migration failed with error: \(error)") } onComplete(result, needsConfigSync) } // if there aren't any migrations to run then just complete immediately (this way the migrator // doesn't try to execute on the DBWrite thread so returning from the background can't get blocked // due to some weird endless process running) guard !unperformedMigrations.isEmpty else { migrationCompleted(.success(())) return } // If we have an unperformed migration then trigger the progress updater immediately if let firstMigrationKey: String = unperformedMigrations.first?.key { self.migrationProgressUpdater?.wrappedValue(firstMigrationKey, 0) } // Note: The non-async migration should only be used for unit tests guard async else { return migrationCompleted(Result(try migrator.migrate(dbWriter))) } migrator.asyncMigrate(dbWriter) { result in let finalResult: Swift.Result = { switch result { case .failure(let error): return .failure(error) case .success: return .success(()) } }() // Note: We need to dispatch this to the next run toop to prevent any potential re-entrancy // issues since the 'asyncMigrate' returns a result containing a DB instance DispatchQueue.global(qos: .userInitiated).async { migrationCompleted(finalResult) } } } public func willStartMigration(_ db: Database, _ migration: Migration.Type) { let unprocessedRequirements: Set = migration.requirements.asSet() .intersection(unprocessedMigrationRequirements.wrappedValue.asSet()) // No need to do anything if there are no unprocessed requirements guard !unprocessedRequirements.isEmpty else { return } // Process all of the requirements for this migration unprocessedRequirements.forEach { migrationRequirementProcesser?.wrappedValue(db, $0) } // Remove any processed requirements from the list (don't want to process them multiple times) unprocessedMigrationRequirements.mutate { $0 = Array($0.asSet().subtracting(migration.requirements.asSet())) } } public static func update( progress: CGFloat, for migration: Migration.Type, in target: TargetMigrations.Identifier ) { // In test builds ignore any migration progress updates (we run in a custom database writer anyway) guard !SNUtilitiesKit.isRunningTests else { return } Storage.shared.migrationProgressUpdater?.wrappedValue(target.key(with: migration), progress) } // MARK: - Security private static func getDatabaseCipherKeySpec() throws -> Data { return try SSKDefaultKeychainStorage.shared.data(forService: keychainService, key: dbCipherKeySpecKey) } @discardableResult private static func getOrGenerateDatabaseKeySpec() throws -> Data { do { var keySpec: Data = try getDatabaseCipherKeySpec() defer { keySpec.resetBytes(in: 0..( info: CallInfo, updates: @escaping (Database) throws -> T ) -> (Database) throws -> T { return { db in let start: CFTimeInterval = CACurrentMediaTime() let actionName: String = (info.actions.contains(.write) ? "write" : "read") let fileName: String = (info.file.components(separatedBy: "/").last.map { " \($0):\(info.line)" } ?? "") let timeout: Timer? = { guard info.actions.contains(.logIfSlow) else { return nil } return Timer.scheduledTimerOnMainThread(withTimeInterval: Storage.writeWarningThreadshold) { $0.invalidate() // Don't want to log on the main thread as to avoid confusion when debugging issues DispatchQueue.global(qos: .default).async { SNLog("[Storage\(fileName)] Slow \(actionName) taking longer than \(Storage.writeWarningThreadshold, format: ".2", omitZeroDecimal: true)s - \(info.function)") } } }() // If we timed out and are logging slow actions then log the actual duration to help us // prioritise performance issues defer { if timeout != nil && timeout?.isValid == false { let end: CFTimeInterval = CACurrentMediaTime() DispatchQueue.global(qos: .default).async { SNLog("[Storage\(fileName)] Slow \(actionName) completed after \(end - start, format: ".2", omitZeroDecimal: true)s") } } timeout?.invalidate() } // Get the result let result: T = try updates(db) // Update the state flags switch info.actions { case [.write], [.write, .logIfSlow]: info.storage?.hasSuccessfullyWritten = true case [.read], [.read, .logIfSlow]: info.storage?.hasSuccessfullyRead = true default: break } return result } } private static func logIfNeeded(_ error: Error, isWrite: Bool) { switch error { case DatabaseError.SQLITE_ABORT: let message: String = ((error as? DatabaseError)?.message ?? "Unknown") SNLog("[Storage] Database \(isWrite ? "write" : "read") failed due to error: \(message)") default: break } } private static func logIfNeeded(_ error: Error, isWrite: Bool) -> T? { logIfNeeded(error, isWrite: isWrite) return nil } // MARK: - Functions @discardableResult public func write( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, using dependencies: Dependencies = Dependencies(), updates: @escaping (Database) throws -> T? ) -> T? { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return nil } let info: CallInfo = { [weak self] in (self, [.write, .logIfSlow], fileName, functionName, lineNumber) }() do { return try dbWriter.write(Storage.perform(info: info, updates: updates)) } catch { return Storage.logIfNeeded(error, isWrite: true) } } open func writeAsync( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, using dependencies: Dependencies = Dependencies(), updates: @escaping (Database) throws -> T ) { writeAsync( fileName: fileName, functionName: functionName, lineNumber: lineNumber, using: dependencies, updates: updates, completion: { _, _ in } ) } open func writeAsync( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, using dependencies: Dependencies = Dependencies(), updates: @escaping (Database) throws -> T, completion: @escaping (Database, Swift.Result) throws -> Void ) { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return } let info: CallInfo = { [weak self] in (self, [.write, .logIfSlow], fileName, functionName, lineNumber) }() dbWriter.asyncWrite( Storage.perform(info: info, updates: updates), completion: { db, result in switch result { case .failure(let error): Storage.logIfNeeded(error, isWrite: true) default: break } try? completion(db, result) } ) } open func writePublisher( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, using dependencies: Dependencies = Dependencies(), updates: @escaping (Database) throws -> T ) -> AnyPublisher { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return Fail(error: StorageError.databaseInvalid) .eraseToAnyPublisher() } let info: CallInfo = { [weak self] in (self, [.write, .logIfSlow], fileName, functionName, lineNumber) }() /// **Note:** GRDB does have a `writePublisher` method but it appears to asynchronously trigger /// both the `output` and `complete` closures at the same time which causes a lot of unexpected /// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code /// for more information see https://github.com/groue/GRDB.swift/issues/1334) /// /// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled /// which behaves in a much more expected way than the GRDB `writePublisher` does return Deferred { Future { resolver in do { resolver(Result.success(try dbWriter.write(Storage.perform(info: info, updates: updates)))) } catch { Storage.logIfNeeded(error, isWrite: true) resolver(Result.failure(error)) } } }.eraseToAnyPublisher() } open func readPublisher( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, using dependencies: Dependencies = Dependencies(), value: @escaping (Database) throws -> T ) -> AnyPublisher { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return Fail(error: StorageError.databaseInvalid) .eraseToAnyPublisher() } let info: CallInfo = { [weak self] in (self, [.read], fileName, functionName, lineNumber) }() /// **Note:** GRDB does have a `readPublisher` method but it appears to asynchronously trigger /// both the `output` and `complete` closures at the same time which causes a lot of unexpected /// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code /// for more information see https://github.com/groue/GRDB.swift/issues/1334) /// /// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled /// which behaves in a much more expected way than the GRDB `readPublisher` does return Deferred { Future { resolver in do { resolver(Result.success(try dbWriter.read(Storage.perform(info: info, updates: value)))) } catch { Storage.logIfNeeded(error, isWrite: false) resolver(Result.failure(error)) } } }.eraseToAnyPublisher() } @discardableResult public func read( fileName: String = #file, functionName: String = #function, lineNumber: Int = #line, using dependencies: Dependencies = Dependencies(), _ value: @escaping (Database) throws -> T? ) -> T? { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return nil } let info: CallInfo = { [weak self] in (self, [.read], fileName, functionName, lineNumber) }() do { return try dbWriter.read(Storage.perform(info: info, updates: value)) } catch { return Storage.logIfNeeded(error, isWrite: false) } } /// Rever to the `ValueObservation.start` method for full documentation /// /// - parameter observation: The observation to start /// - parameter scheduler: A Scheduler. By default, fresh values are /// dispatched asynchronously on the main queue. /// - parameter onError: A closure that is provided eventual errors that /// happen during observation /// - parameter onChange: A closure that is provided fresh values /// - returns: a DatabaseCancellable public func start( _ observation: ValueObservation, scheduling scheduler: ValueObservationScheduler = .async(onQueue: .main), onError: @escaping (Error) -> Void, onChange: @escaping (Reducer.Value) -> Void ) -> DatabaseCancellable { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { onError(StorageError.databaseInvalid) return AnyDatabaseCancellable(cancel: {}) } return observation.start( in: dbWriter, scheduling: scheduler, onError: onError, onChange: onChange ) } public func addObserver(_ observer: TransactionObserver?) { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return } guard let observer: TransactionObserver = observer else { return } // Note: This actually triggers a write to the database so can be blocked by other // writes, since it's usually called on the main thread when creating a view controller // this can result in the UI hanging - to avoid this we dispatch (and hope there isn't // negative impact) DispatchQueue.global(qos: .default).async { dbWriter.add(transactionObserver: observer) } } public func removeObserver(_ observer: TransactionObserver?) { guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return } guard let observer: TransactionObserver = observer else { return } // Note: This actually triggers a write to the database so can be blocked by other // writes, since it's usually called on the main thread when creating a view controller // this can result in the UI hanging - to avoid this we dispatch (and hope there isn't // negative impact) DispatchQueue.global(qos: .default).async { dbWriter.remove(transactionObserver: observer) } } } // MARK: - Combine Extensions public extension ValueObservation { func publisher( in storage: Storage, scheduling scheduler: ValueObservationScheduler = Storage.defaultPublisherScheduler ) -> AnyPublisher where Reducer: ValueReducer { guard storage.isValid, let dbWriter: DatabaseWriter = storage.dbWriter else { return Fail(error: StorageError.databaseInvalid).eraseToAnyPublisher() } return self.publisher(in: dbWriter, scheduling: scheduler) .eraseToAnyPublisher() } } // MARK: - Debug Convenience #if DEBUG public extension Storage { func exportInfo(password: String) throws -> (dbPath: String, keyPath: String) { var keySpec: Data = try Storage.getOrGenerateDatabaseKeySpec() defer { keySpec.resetBytes(in: 0..