Started working on refactoring the migrations to avoid an anti-pattern

pull/894/head
Morgan Pretty 2 months ago
parent 1f3f7ba7c6
commit 647919efde

@ -27,219 +27,219 @@ enum _001_InitialSetupMigration: Migration {
}()
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
try db.create(table: Contact.self) { t in
t.column(.id, .text)
try db.create(table: "contact") { t in
t.column("id", .text)
.notNull()
.primaryKey()
t.column(.isTrusted, .boolean)
t.column("isTrusted", .boolean)
.notNull()
.defaults(to: false)
t.column(.isApproved, .boolean)
t.column("isApproved", .boolean)
.notNull()
.defaults(to: false)
t.column(.isBlocked, .boolean)
t.column("isBlocked", .boolean)
.notNull()
.defaults(to: false)
t.column(.didApproveMe, .boolean)
t.column("didApproveMe", .boolean)
.notNull()
.defaults(to: false)
t.column(.hasBeenBlocked, .boolean)
t.column("hasBeenBlocked", .boolean)
.notNull()
.defaults(to: false)
}
try db.create(table: Profile.self) { t in
t.column(.id, .text)
try db.create(table: "profile") { t in
t.column("id", .text)
.notNull()
.primaryKey()
t.column(.name, .text).notNull()
t.column(.nickname, .text)
t.column(.profilePictureUrl, .text)
t.column(.profilePictureFileName, .text)
t.column(.profileEncryptionKey, .blob)
t.column("name", .text).notNull()
t.column("nickname", .text)
t.column("profilePictureUrl", .text)
t.column("profilePictureFileName", .text)
t.column("profileEncryptionKey", .blob)
}
/// Create a full-text search table synchronized with the Profile table
try db.create(virtualTable: Profile.fullTextSearchTableName, using: FTS5()) { t in
t.synchronize(withTable: Profile.databaseTableName)
try db.create(virtualTable: "profile_fts", using: FTS5()) { t in
t.synchronize(withTable: "profile")
t.tokenizer = _001_InitialSetupMigration.fullTextSearchTokenizer
t.column(Profile.Columns.nickname.name)
t.column(Profile.Columns.name.name)
t.column("nickname")
t.column("name")
}
try db.create(table: SessionThread.self) { t in
t.column(.id, .text)
try db.create(table: "thread") { t in
t.column("id", .text)
.notNull()
.primaryKey()
t.column(.variant, .integer).notNull()
t.column(.creationDateTimestamp, .double).notNull()
t.column(.shouldBeVisible, .boolean).notNull()
t.deprecatedColumn(name: "isPinned", .boolean).notNull()
t.column(.messageDraft, .text)
t.column(.notificationSound, .integer)
t.column(.mutedUntilTimestamp, .double)
t.column(.onlyNotifyForMentions, .boolean)
t.column("variant", .integer).notNull()
t.column("creationDateTimestamp", .double).notNull()
t.column("shouldBeVisible", .boolean).notNull()
t.column("isPinned", .boolean).notNull()
t.column("messageDraft", .text)
t.column("notificationSound", .integer)
t.column("mutedUntilTimestamp", .double)
t.column("onlyNotifyForMentions", .boolean)
.notNull()
.defaults(to: false)
}
try db.create(table: DisappearingMessagesConfiguration.self) { t in
t.column(.threadId, .text)
try db.create(table: "disappearingMessagesConfiguration") { t in
t.column("threadId", .text)
.notNull()
.primaryKey()
.references(SessionThread.self, onDelete: .cascade) // Delete if Thread deleted
t.column(.isEnabled, .boolean)
.references("thread", onDelete: .cascade) // Delete if Thread deleted
t.column("isEnabled", .boolean)
.defaults(to: false)
.notNull()
t.column(.durationSeconds, .double)
t.column("durationSeconds", .double)
.defaults(to: 0)
.notNull()
}
try db.create(table: ClosedGroup.self) { t in
t.column(.threadId, .text)
try db.create(table: "closedGroup") { t in
t.column("threadId", .text)
.notNull()
.primaryKey()
.references(SessionThread.self, onDelete: .cascade) // Delete if Thread deleted
t.column(.name, .text).notNull()
t.column(.formationTimestamp, .double).notNull()
.references("thread", onDelete: .cascade) // Delete if Thread deleted
t.column("name", .text).notNull()
t.column("formationTimestamp", .double).notNull()
}
/// Create a full-text search table synchronized with the ClosedGroup table
try db.create(virtualTable: ClosedGroup.fullTextSearchTableName, using: FTS5()) { t in
t.synchronize(withTable: ClosedGroup.databaseTableName)
try db.create(virtualTable: "closedGroup_fts", using: FTS5()) { t in
t.synchronize(withTable: "closedGroup")
t.tokenizer = _001_InitialSetupMigration.fullTextSearchTokenizer
t.column(ClosedGroup.Columns.name.name)
t.column("name")
}
try db.create(table: ClosedGroupKeyPair.self) { t in
t.column(.threadId, .text)
try db.create(table: "closedGroupKeyPair") { t in
t.column("threadId", .text)
.notNull()
.indexed() // Quicker querying
.references(ClosedGroup.self, onDelete: .cascade) // Delete if ClosedGroup deleted
t.column(.publicKey, .blob).notNull()
t.column(.secretKey, .blob).notNull()
t.column(.receivedTimestamp, .double)
.references("closedGroup", onDelete: .cascade) // Delete if ClosedGroup deleted
t.column("publicKey", .blob).notNull()
t.column("secretKey", .blob).notNull()
t.column("receivedTimestamp", .double)
.notNull()
.indexed() // Quicker querying
t.uniqueKey([.publicKey, .secretKey, .receivedTimestamp])
t.uniqueKey(["publicKey", "secretKey", "receivedTimestamp"])
}
try db.create(table: OpenGroup.self) { t in
try db.create(table: "openGroup") { t in
// Note: There is no foreign key constraint here because we need an OpenGroup entry to
// exist to be able to retrieve the default open group rooms - as a result we need to
// manually handle deletion of this object (in both OpenGroupManager and GarbageCollectionJob)
t.column(.threadId, .text)
t.column("threadId", .text)
.notNull()
.primaryKey()
t.column(.server, .text)
t.column("server", .text)
.indexed() // Quicker querying
.notNull()
t.column(.roomToken, .text).notNull()
t.column(.publicKey, .text).notNull()
t.column(.isActive, .boolean)
t.column("roomToken", .text).notNull()
t.column("publicKey", .text).notNull()
t.column("isActive", .boolean)
.notNull()
.defaults(to: false)
t.column(.name, .text).notNull()
t.column(.roomDescription, .text)
t.column(.imageId, .text)
t.deprecatedColumn(name: "imageData", .blob)
t.column(.userCount, .integer).notNull()
t.column(.infoUpdates, .integer).notNull()
t.column(.sequenceNumber, .integer).notNull()
t.column(.inboxLatestMessageId, .integer).notNull()
t.column(.outboxLatestMessageId, .integer).notNull()
t.column(.pollFailureCount, .integer)
t.column("name", .text).notNull()
t.column("description", .text)
t.column("imageId", .text)
t.column("imageData", .blob)
t.column("userCount", .integer).notNull()
t.column("infoUpdates", .integer).notNull()
t.column("sequenceNumber", .integer).notNull()
t.column("inboxLatestMessageId", .integer).notNull()
t.column("outboxLatestMessageId", .integer).notNull()
t.column("pollFailureCount", .integer)
.notNull()
.defaults(to: 0)
}
/// Create a full-text search table synchronized with the OpenGroup table
try db.create(virtualTable: OpenGroup.fullTextSearchTableName, using: FTS5()) { t in
t.synchronize(withTable: OpenGroup.databaseTableName)
try db.create(virtualTable: "openGroup_fts", using: FTS5()) { t in
t.synchronize(withTable: "openGroup")
t.tokenizer = _001_InitialSetupMigration.fullTextSearchTokenizer
t.column(OpenGroup.Columns.name.name)
t.column("name")
}
try db.create(table: Capability.self) { t in
t.column(.openGroupServer, .text)
try db.create(table: "capability") { t in
t.column("openGroupServer", .text)
.notNull()
.indexed() // Quicker querying
t.column(.variant, .text).notNull()
t.column(.isMissing, .boolean).notNull()
t.column("variant", .text).notNull()
t.column("isMissing", .boolean).notNull()
t.primaryKey([.openGroupServer, .variant])
t.primaryKey(["openGroupServer", "variant"])
}
try db.create(table: BlindedIdLookup.self) { t in
t.column(.blindedId, .text)
try db.create(table: "blindedIdLookup") { t in
t.column("blindedId", .text)
.primaryKey()
t.column(.sessionId, .text)
t.column("sessionId", .text)
.indexed() // Quicker querying
t.column(.openGroupServer, .text)
t.column("openGroupServer", .text)
.notNull()
.indexed() // Quicker querying
t.column(.openGroupPublicKey, .text)
t.column("openGroupPublicKey", .text)
.notNull()
}
try db.create(table: GroupMember.self) { t in
try db.create(table: "groupMember") { t in
// Note: Since we don't know whether this will be stored against a 'ClosedGroup' or
// an 'OpenGroup' we add the foreign key constraint against the thread itself (which
// shares the same 'id' as the 'groupId') so we can cascade delete automatically
t.column(.groupId, .text)
t.column("groupId", .text)
.notNull()
.indexed() // Quicker querying
.references(SessionThread.self, onDelete: .cascade) // Delete if Thread deleted
t.column(.profileId, .text)
.references("thread", onDelete: .cascade) // Delete if Thread deleted
t.column("profileId", .text)
.notNull()
.indexed() // Quicker querying
t.column(.role, .integer).notNull()
t.column("role", .integer).notNull()
}
try db.create(table: Interaction.self) { t in
t.column(.id, .integer)
try db.create(table: "interaction") { t in
t.column("id", .integer)
.notNull()
.primaryKey(autoincrement: true)
t.column(.serverHash, .text)
t.column(.messageUuid, .text)
t.column("serverHash", .text)
t.column("messageUuid", .text)
.indexed() // Quicker querying
t.column(.threadId, .text)
t.column("threadId", .text)
.notNull()
.indexed() // Quicker querying
.references(SessionThread.self, onDelete: .cascade) // Delete if Thread deleted
t.column(.authorId, .text)
.references("thread", onDelete: .cascade) // Delete if Thread deleted
t.column("authorId", .text)
.notNull()
.indexed() // Quicker querying
t.column(.variant, .integer).notNull()
t.column(.body, .text)
t.column(.timestampMs, .integer)
t.column("variant", .integer).notNull()
t.column("body", .text)
t.column("timestampMs", .integer)
.notNull()
.indexed() // Quicker querying
t.column(.receivedAtTimestampMs, .integer).notNull()
t.column(.wasRead, .boolean)
t.column("receivedAtTimestampMs", .integer).notNull()
t.column("wasRead", .boolean)
.notNull()
.indexed() // Quicker querying
.defaults(to: false)
t.column(.hasMention, .boolean)
t.column("hasMention", .boolean)
.notNull()
.indexed() // Quicker querying
.defaults(to: false)
t.column(.expiresInSeconds, .double)
t.column(.expiresStartedAtMs, .double)
t.column(.linkPreviewUrl, .text)
t.column("expiresInSeconds", .double)
t.column("expiresStartedAtMs", .double)
t.column("linkPreviewUrl", .text)
t.column(.openGroupServerMessageId, .integer)
t.column("openGroupServerMessageId", .integer)
.indexed() // Quicker querying
t.column(.openGroupWhisperMods, .boolean)
t.column("openGroupWhisperMods", .boolean)
.notNull()
.defaults(to: false)
t.column(.openGroupWhisperTo, .text)
t.column("openGroupWhisperTo", .text)
/// The below unique constraints are added to prevent messages being duplicated, we need
/// multiple constraints to handle the different situations which can result in duplicate messages,
@ -262,130 +262,130 @@ enum _001_InitialSetupMigration: Migration {
/// Threads with variants: [`openGroup`]:
/// `threadId` - Unique per thread
/// `openGroupServerMessageId` - Unique for VisibleMessage's on an OpenGroup server
t.uniqueKey([.threadId, .authorId, .timestampMs])
t.uniqueKey([.threadId, .serverHash])
t.uniqueKey([.threadId, .messageUuid])
t.uniqueKey([.threadId, .openGroupServerMessageId])
t.uniqueKey(["threadId", "authorId", "timestampMs"])
t.uniqueKey(["threadId", "serverHash"])
t.uniqueKey(["threadId", "messageUuid"])
t.uniqueKey(["threadId", "openGroupServerMessageId"])
}
/// Create a full-text search table synchronized with the Interaction table
try db.create(virtualTable: Interaction.fullTextSearchTableName, using: FTS5()) { t in
t.synchronize(withTable: Interaction.databaseTableName)
try db.create(virtualTable: "interaction_fts", using: FTS5()) { t in
t.synchronize(withTable: "interaction")
t.tokenizer = _001_InitialSetupMigration.fullTextSearchTokenizer
t.column(Interaction.Columns.body.name)
t.column("body")
}
try db.create(table: LegacyRecipientState.self) { t in
t.column(.interactionId, .integer)
try db.create(table: "recipientState") { t in
t.column("interactionId", .integer)
.notNull()
.indexed() // Quicker querying
.references(Interaction.self, onDelete: .cascade) // Delete if interaction deleted
t.column(.recipientId, .text)
.references("interaction", onDelete: .cascade) // Delete if interaction deleted
t.column("recipientId", .text)
.notNull()
.indexed() // Quicker querying
t.column(.state, .integer)
t.column("state", .integer)
.notNull()
.indexed() // Quicker querying
t.column(.readTimestampMs, .double)
t.column(.mostRecentFailureText, .text)
t.column("readTimestampMs", .double)
t.column("mostRecentFailureText", .text)
// We want to ensure that a recipient can only have a single state for
// each interaction
t.primaryKey([.interactionId, .recipientId])
t.primaryKey(["interactionId", "recipientId"])
}
try db.create(table: Attachment.self) { t in
t.column(.id, .text)
try db.create(table: "attachment") { t in
t.column("id", .text)
.notNull()
.primaryKey()
t.column(.serverId, .text)
t.column(.variant, .integer).notNull()
t.column(.state, .integer)
t.column("serverId", .text)
t.column("variant", .integer).notNull()
t.column("state", .integer)
.notNull()
.indexed() // Quicker querying
t.column(.contentType, .text).notNull()
t.column(.byteCount, .integer)
t.column("contentType", .text).notNull()
t.column("byteCount", .integer)
.notNull()
.defaults(to: 0)
t.column(.creationTimestamp, .double)
t.column(.sourceFilename, .text)
t.column(.downloadUrl, .text)
t.column(.localRelativeFilePath, .text)
t.column(.width, .integer)
t.column(.height, .integer)
t.column(.duration, .double)
t.column(.isVisualMedia, .boolean)
t.column("creationTimestamp", .double)
t.column("sourceFilename", .text)
t.column("downloadUrl", .text)
t.column("localRelativeFilePath", .text)
t.column("width", .integer)
t.column("height", .integer)
t.column("duration", .double)
t.column("isVisualMedia", .boolean)
.notNull()
.defaults(to: false)
t.column(.isValid, .boolean)
t.column("isValid", .boolean)
.notNull()
.defaults(to: false)
t.column(.encryptionKey, .blob)
t.column(.digest, .blob)
t.column(.caption, .text)
t.column("encryptionKey", .blob)
t.column("digest", .blob)
t.column("caption", .text)
}
try db.create(table: InteractionAttachment.self) { t in
t.column(.albumIndex, .integer).notNull()
t.column(.interactionId, .integer)
try db.create(table: "interactionAttachment") { t in
t.column("albumIndex", .integer).notNull()
t.column("interactionId", .integer)
.notNull()
.indexed() // Quicker querying
.references(Interaction.self, onDelete: .cascade) // Delete if interaction deleted
t.column(.attachmentId, .text)
.references("interaction", onDelete: .cascade) // Delete if interaction deleted
t.column("attachmentId", .text)
.notNull()
.indexed() // Quicker querying
.references(Attachment.self, onDelete: .cascade) // Delete if attachment deleted
.references("attachment", onDelete: .cascade) // Delete if attachment deleted
}
try db.create(table: Quote.self) { t in
t.column(.interactionId, .integer)
try db.create(table: "quote") { t in
t.column("interactionId", .integer)
.notNull()
.primaryKey()
.references(Interaction.self, onDelete: .cascade) // Delete if interaction deleted
t.column(.authorId, .text)
.references("interaction", onDelete: .cascade) // Delete if interaction deleted
t.column("authorId", .text)
.notNull()
.indexed() // Quicker querying
.references(Profile.self)
t.column(.timestampMs, .double).notNull()
t.column(.body, .text)
t.column(.attachmentId, .text)
.references("profile")
t.column("timestampMs", .double).notNull()
t.column("body", .text)
t.column("attachmentId", .text)
.indexed() // Quicker querying
.references(Attachment.self, onDelete: .setNull) // Clear if attachment deleted
.references("attachment", onDelete: .setNull) // Clear if attachment deleted
}
try db.create(table: LinkPreview.self) { t in
t.column(.url, .text)
try db.create(table: "linkPreview") { t in
t.column("url", .text)
.notNull()
.indexed() // Quicker querying
t.column(.timestamp, .double)
t.column("timestamp", .double)
.notNull()
.indexed() // Quicker querying
t.column(.variant, .integer).notNull()
t.column(.title, .text)
t.column(.attachmentId, .text)
t.column("variant", .integer).notNull()
t.column("title", .text)
t.column("attachmentId", .text)
.indexed() // Quicker querying
.references(Attachment.self) // Managed via garbage collection
.references("attachment") // Managed via garbage collection
t.primaryKey([.url, .timestamp])
t.primaryKey(["url", "timestamp"])
}
try db.create(table: ControlMessageProcessRecord.self) { t in
t.column(.threadId, .text)
try db.create(table: "controlMessageProcessRecord") { t in
t.column("threadId", .text)
.notNull()
.indexed() // Quicker querying
t.column(.variant, .integer).notNull()
t.column(.timestampMs, .integer).notNull()
t.column(.serverExpirationTimestamp, .double)
t.column("variant", .integer).notNull()
t.column("timestampMs", .integer).notNull()
t.column("serverExpirationTimestamp", .double)
t.uniqueKey([.threadId, .variant, .timestampMs])
t.uniqueKey(["threadId", "variant", "timestampMs"])
}
try db.create(table: ThreadTypingIndicator.self) { t in
t.column(.threadId, .text)
try db.create(table: "threadTypingIndicator") { t in
t.column("threadId", .text)
.primaryKey()
.references(SessionThread.self, onDelete: .cascade) // Delete if thread deleted
t.column(.timestampMs, .integer).notNull()
.references("thread", onDelete: .cascade) // Delete if thread deleted
t.column("timestampMs", .integer).notNull()
}
Storage.update(progress: 1, for: self, in: target, using: dependencies)

@ -18,39 +18,22 @@ enum _002_SetupStandardJobs: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
// Start by adding the jobs that don't have collections (in the jobs like these
// will be added via migrations)
try autoreleasepool {
_ = try Job(
variant: .disappearingMessages,
behaviour: .recurringOnLaunch,
shouldBlock: true
).migrationSafeInserted(db)
_ = try Job(
variant: .failedMessageSends,
behaviour: .recurringOnLaunch,
shouldBlock: true
).migrationSafeInserted(db)
_ = try Job(
variant: .failedAttachmentDownloads,
behaviour: .recurringOnLaunch,
shouldBlock: true
).migrationSafeInserted(db)
_ = try Job(
variant: .updateProfilePicture,
behaviour: .recurringOnActive
).migrationSafeInserted(db)
_ = try Job(
variant: .retrieveDefaultOpenGroupRooms,
behaviour: .recurringOnActive
).migrationSafeInserted(db)
_ = try Job(
variant: .garbageCollection,
behaviour: .recurringOnActive
).migrationSafeInserted(db)
let jobInfo: [(variant: Job.Variant, behaviour: Job.Behaviour, shouldBlock: Bool)] = [
(.disappearingMessages, .recurringOnLaunch, true),
(.failedMessageSends, .recurringOnLaunch, true),
(.failedAttachmentDownloads, .recurringOnLaunch, true),
(.updateProfilePicture, .recurringOnActive, false),
(.retrieveDefaultOpenGroupRooms, .recurringOnActive, false),
(.garbageCollection, .recurringOnActive, false)
]
try jobInfo.forEach { variant, behaviour, shouldBlock in
try db.execute(
sql: """
INSERT INTO job VALUES (?, ?, ?)
""",
arguments: [variant.rawValue, behaviour.rawValue, shouldBlock]
)
}
Storage.update(progress: 1, for: self, in: target, using: dependencies)

@ -14,8 +14,15 @@ enum _003_YDBToGRDBMigration: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
guard
!SNUtilitiesKit.isRunningTests &&
Identity.userExists(db, using: dependencies)
!SNUtilitiesKit.isRunningTests,
let numEdSecretKeys: Int = try? Int.fetchOne(
db,
sql: "SELECT COUNT(*) FROM identity WHERE variant == ?",
arguments: [
Identity.Variant.ed25519SecretKey.rawValue
]
),
numEdSecretKeys > 0
else { return Storage.update(progress: 1, for: self, in: target, using: dependencies) }
Log.error(.migration, "Attempted to perform legacy migation")

@ -14,13 +14,17 @@ enum _005_FixDeletedMessageReadState: Migration {
static let droppedTables: [(TableRecord & FetchableRecord).Type] = []
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
_ = try Interaction
.filter(
Interaction.Columns.variant == Interaction.Variant.standardIncomingDeleted ||
Interaction.Columns.variant == Interaction.Variant.standardOutgoing ||
Interaction.Columns.variant == Interaction.Variant.infoDisappearingMessagesUpdate
)
.updateAll(db, Interaction.Columns.wasRead.set(to: true))
try db.execute(
sql: """
UPDATE interaction
SET wasRead = true
WHERE variant IN (?, ?, ?)
""",
arguments: [
Interaction.Variant.standardIncomingDeleted.rawValue,
Interaction.Variant.standardOutgoing.rawValue,
Interaction.Variant.infoDisappearingMessagesUpdate.rawValue
])
Storage.update(progress: 1, for: self, in: target, using: dependencies)
}

@ -15,8 +15,8 @@ enum _006_FixHiddenModAdminSupport: Migration {
static let droppedTables: [(TableRecord & FetchableRecord).Type] = []
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
try db.alter(table: GroupMember.self) { t in
t.add(.isHidden, .boolean)
try db.alter(table: "groupMember") { t in
t.add(column: "isHidden", .boolean)
.notNull()
.defaults(to: false)
}
@ -24,8 +24,10 @@ enum _006_FixHiddenModAdminSupport: Migration {
// When modifying OpenGroup behaviours we should always look to reset the `infoUpdates`
// value for all OpenGroups to ensure they all have the correct state for newly
// added/changed fields
_ = try OpenGroup
.updateAll(db, OpenGroup.Columns.infoUpdates.set(to: 0))
try db.execute(sql: """
UPDATE openGroup
SET infoUpdates = 0
""")
Storage.update(progress: 1, for: self, in: target, using: dependencies)
}

@ -18,21 +18,21 @@ enum _007_HomeQueryOptimisationIndexes: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
try db.create(
index: "interaction_on_wasRead_and_hasMention_and_threadId",
on: Interaction.databaseTableName,
on: "interaction",
columns: [
Interaction.Columns.wasRead.name,
Interaction.Columns.hasMention.name,
Interaction.Columns.threadId.name
"wasRead",
"hasMention",
"threadId"
]
)
try db.create(
index: "interaction_on_threadId_and_timestampMs_and_variant",
on: Interaction.databaseTableName,
on: "interaction",
columns: [
Interaction.Columns.threadId.name,
Interaction.Columns.timestampMs.name,
Interaction.Columns.variant.name
"threadId",
"timestampMs",
"variant"
]
)

@ -14,29 +14,29 @@ enum _008_EmojiReacts: Migration {
static let droppedTables: [(TableRecord & FetchableRecord).Type] = []
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
try db.create(table: Reaction.self) { t in
t.column(.interactionId, .numeric)
try db.create(table: "reaction") { t in
t.column("interactionId", .numeric)
.notNull()
.indexed() // Quicker querying
.references(Interaction.self, onDelete: .cascade) // Delete if Interaction deleted
t.column(.serverHash, .text)
t.column(.timestampMs, .text)
.references("interaction", onDelete: .cascade) // Delete if Interaction deleted
t.column("serverHash", .text)
t.column("timestampMs", .text)
.notNull()
t.column(.authorId, .text)
t.column("authorId", .text)
.notNull()
.indexed() // Quicker querying
t.column(.emoji, .text)
t.column("emoji", .text)
.notNull()
.indexed() // Quicker querying
t.column(.count, .integer)
t.column("count", .integer)
.notNull()
.defaults(to: 0)
t.column(.sortId, .integer)
t.column("sortId", .integer)
.notNull()
.defaults(to: 0)
/// A specific author should only be able to have a single instance of each emoji on a particular interaction
t.uniqueKey([.interactionId, .emoji, .authorId])
t.uniqueKey(["interactionId", "emoji", "authorId"])
}
Storage.update(progress: 1, for: self, in: target, using: dependencies)

@ -13,16 +13,18 @@ enum _009_OpenGroupPermission: Migration {
static let droppedTables: [(TableRecord & FetchableRecord).Type] = []
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
try db.alter(table: OpenGroup.self) { t in
t.add(.permissions, .integer)
try db.alter(table: "openGroup") { t in
t.add(column: "permissions", .integer)
.defaults(to: OpenGroup.Permissions.all)
}
// When modifying OpenGroup behaviours we should always look to reset the `infoUpdates`
// value for all OpenGroups to ensure they all have the correct state for newly
// added/changed fields
_ = try OpenGroup
.updateAll(db, OpenGroup.Columns.infoUpdates.set(to: 0))
try db.execute(sql: """
UPDATE openGroup
SET infoUpdates = 0
""")
Storage.update(progress: 1, for: self, in: target, using: dependencies)
}

@ -17,17 +17,17 @@ enum _010_AddThreadIdToFTS: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
// Can't actually alter a virtual table in SQLite so we need to drop and recreate it,
// luckily this is actually pretty quick
if try db.tableExists(Interaction.fullTextSearchTableName) {
try db.drop(table: Interaction.fullTextSearchTableName)
try db.dropFTS5SynchronizationTriggers(forTable: Interaction.fullTextSearchTableName)
if try db.tableExists("interaction_fts") {
try db.drop(table: "interaction_fts")
try db.dropFTS5SynchronizationTriggers(forTable: "interaction_fts")
}
try db.create(virtualTable: Interaction.fullTextSearchTableName, using: FTS5()) { t in
t.synchronize(withTable: Interaction.databaseTableName)
try db.create(virtualTable: "interaction_fts", using: FTS5()) { t in
t.synchronize(withTable: "interaction")
t.tokenizer = _001_InitialSetupMigration.fullTextSearchTokenizer
t.column(Interaction.Columns.body.name)
t.column(Interaction.Columns.threadId.name)
t.column("body")
t.column("threadId")
}
Storage.update(progress: 1, for: self, in: target, using: dependencies)

@ -15,20 +15,20 @@ enum _011_AddPendingReadReceipts: Migration {
static let droppedTables: [(TableRecord & FetchableRecord).Type] = []
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
try db.create(table: PendingReadReceipt.self) { t in
t.column(.threadId, .text)
try db.create(table: "pendingReadReceipt") { t in
t.column("threadId", .text)
.notNull()
.indexed() // Quicker querying
.references(SessionThread.self, onDelete: .cascade) // Delete if Thread deleted
t.column(.interactionTimestampMs, .integer)
.references("thread", onDelete: .cascade) // Delete if Thread deleted
t.column("interactionTimestampMs", .integer)
.notNull()
.indexed() // Quicker querying
t.column(.readTimestampMs, .integer)
t.column("readTimestampMs", .integer)
.notNull()
t.column(.serverExpirationTimestamp, .double)
t.column("serverExpirationTimestamp", .double)
.notNull()
t.primaryKey([.threadId, .interactionTimestampMs])
t.primaryKey(["threadId", "interactionTimestampMs"])
}
Storage.update(progress: 1, for: self, in: target, using: dependencies)

@ -16,13 +16,13 @@ enum _012_AddFTSIfNeeded: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
// Fix an issue that the fullTextSearchTable was dropped unintentionally and global search won't work.
// This issue only happens to internal test users.
if try db.tableExists(Interaction.fullTextSearchTableName) == false {
try db.create(virtualTable: Interaction.fullTextSearchTableName, using: FTS5()) { t in
t.synchronize(withTable: Interaction.databaseTableName)
if try db.tableExists("interaction_fts") == false {
try db.create(virtualTable: "interaction_fts", using: FTS5()) { t in
t.synchronize(withTable: "interaction")
t.tokenizer = _001_InitialSetupMigration.fullTextSearchTokenizer
t.column(Interaction.Columns.body.name)
t.column(Interaction.Columns.threadId.name)
t.column("body")
t.column("threadId")
}
}

@ -24,175 +24,162 @@ enum _013_SessionUtilChanges: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
// Add `markedAsUnread` to the thread table
try db.alter(table: SessionThread.self) { t in
t.add(.markedAsUnread, .boolean)
t.add(.pinnedPriority, .integer)
try db.alter(table: "thread") { t in
t.add(column: "markedAsUnread", .boolean)
t.add(column: "pinnedPriority", .integer)
}
// Add `lastNameUpdate` and `lastProfilePictureUpdate` columns to the profile table
try db.alter(table: Profile.self) { t in
t.add(.lastNameUpdate, .integer).defaults(to: 0)
t.add(.lastProfilePictureUpdate, .integer).defaults(to: 0)
try db.alter(table: "profile".self) { t in
t.add(column: "lastNameUpdate", .integer).defaults(to: 0)
t.add(column: "lastProfilePictureUpdate", .integer).defaults(to: 0)
}
// SQLite doesn't support adding a new primary key after creation so we need to create a new table with
// the setup we want, copy data from the old table over, drop the old table and rename the new table
struct TmpGroupMember: Codable, TableRecord, FetchableRecord, PersistableRecord, ColumnExpressible {
static var databaseTableName: String { "tmpGroupMember" }
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case groupId
case profileId
case role
case isHidden
}
public let groupId: String
public let profileId: String
public let role: GroupMember.Role
public let isHidden: Bool
}
try db.create(table: TmpGroupMember.self) { t in
try db.create(table: "tmpGroupMember") { t in
// Note: Since we don't know whether this will be stored against a 'ClosedGroup' or
// an 'OpenGroup' we add the foreign key constraint against the thread itself (which
// shares the same 'id' as the 'groupId') so we can cascade delete automatically
t.column(.groupId, .text)
t.column("groupId", .text)
.notNull()
.references(SessionThread.self, onDelete: .cascade) // Delete if Thread deleted
t.column(.profileId, .text)
.references("thread", onDelete: .cascade) // Delete if Thread deleted
t.column("profileId", .text)
.notNull()
t.column(.role, .integer).notNull()
t.column(.isHidden, .boolean)
t.column("role", .integer).notNull()
t.column("isHidden", .boolean)
.notNull()
.defaults(to: false)
t.primaryKey([.groupId, .profileId, .role])
t.primaryKey(["groupId", "profileId", "role"])
}
// Retrieve the non-duplicate group member entries from the old table
let nonDuplicateGroupMembers: [TmpGroupMember] = try GroupMember
.select(.groupId, .profileId, .role, .isHidden)
.group(GroupMember.Columns.groupId, GroupMember.Columns.profileId, GroupMember.Columns.role)
.asRequest(of: TmpGroupMember.self)
.fetchAll(db)
try db.execute(sql: """
INSERT INTO tmpGroupMember (groupId, profileId, role, isHidden)
SELECT groupId, profileId, role, MAX(isHidden) AS isHidden
FROM groupMember
GROUP BY groupId, profileId, role
""")
// Insert into the new table, drop the old table and rename the new table to be the old one
try nonDuplicateGroupMembers.forEach { try $0.upsert(db) }
try db.drop(table: GroupMember.self)
try db.rename(table: TmpGroupMember.databaseTableName, to: GroupMember.databaseTableName)
try db.drop(table: "groupMember")
try db.rename(table: "tmpGroupMember", to: "groupMember")
// Need to create the indexes separately from creating 'TmpGroupMember' to ensure they
// Need to create the indexes separately from creating 'tmpGroupMember' to ensure they
// have the correct names
try db.createIndex(on: GroupMember.self, columns: [.groupId])
try db.createIndex(on: GroupMember.self, columns: [.profileId])
// SQLite doesn't support removing unique constraints so we need to create a new table with
// the setup we want, copy data from the old table over, drop the old table and rename the new table
struct TmpClosedGroupKeyPair: Codable, TableRecord, FetchableRecord, PersistableRecord, ColumnExpressible {
static var databaseTableName: String { "tmpClosedGroupKeyPair" }
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case threadId
case publicKey
case secretKey
case receivedTimestamp
case threadKeyPairHash
}
public let threadId: String
public let publicKey: Data
public let secretKey: Data
public let receivedTimestamp: TimeInterval
public let threadKeyPairHash: String
}
try db.create(index: "groupMember_on_groupId", on: "groupMember", columns: ["groupId"])
try db.create(index: "groupMember_on_profileId", on: "groupMember", columns: ["profileId"])
try db.alter(table: ClosedGroupKeyPair.self) { t in
t.add(.threadKeyPairHash, .text).defaults(to: "")
try db.alter(table: "closedGroupKeyPair") { t in
t.add(column: "threadKeyPairHash", .text).defaults(to: "")
}
try db.create(table: TmpClosedGroupKeyPair.self) { t in
t.column(.threadId, .text)
try db.create(table: "tmpClosedGroupKeyPair") { t in
t.column("threadId", .text)
.notNull()
.references(ClosedGroup.self, onDelete: .cascade) // Delete if ClosedGroup deleted
t.column(.publicKey, .blob).notNull()
t.column(.secretKey, .blob).notNull()
t.column(.receivedTimestamp, .double)
.references("closedGroup", onDelete: .cascade) // Delete if ClosedGroup deleted
t.column("publicKey", .blob).notNull()
t.column("secretKey", .blob).notNull()
t.column("receivedTimestamp", .double)
.notNull()
t.column(.threadKeyPairHash, .integer)
t.column("threadKeyPairHash", .integer)
.notNull()
.unique()
}
// Insert into the new table, drop the old table and rename the new table to be the old one
try ClosedGroupKeyPair
.fetchAll(db)
.map { keyPair in
ClosedGroupKeyPair(
threadId: keyPair.threadId,
publicKey: keyPair.publicKey,
secretKey: keyPair.secretKey,
receivedTimestamp: keyPair.receivedTimestamp
)
}
.map { keyPair in
TmpClosedGroupKeyPair(
threadId: keyPair.threadId,
publicKey: keyPair.publicKey,
secretKey: keyPair.secretKey,
receivedTimestamp: keyPair.receivedTimestamp,
threadKeyPairHash: keyPair.threadKeyPairHash
)
}
.forEach { try? $0.insert(db) } // Ignore duplicate values
try db.drop(table: ClosedGroupKeyPair.self)
try db.rename(table: TmpClosedGroupKeyPair.databaseTableName, to: ClosedGroupKeyPair.databaseTableName)
let existingKeyPairs: [Row] = try Row.fetchAll(db, sql: "SELECT * FROM closedGroupKeyPair")
existingKeyPairs.forEach { row in
let threadId: String = row["threadId"]
let publicKey: Data = row["publicKey"]
let secretKey: Data = row["secretKey"]
// Optional try as we want to ignore duplicate values
try? db.execute(
sql: """
INSERT INTO tmpClosedGroupKeyPair (threadId, publicKey, secretKey, receivedTimestamp, threadKeyPairHash)
VALUES (?, ?, ?, ?, ?)
FROM groupMember
GROUP BY groupId, profileId, role
""",
arguments: [
threadId,
publicKey,
secretKey,
row["receivedTimestamp"],
ClosedGroupKeyPair.generateHash(
threadId: threadId,
publicKey: publicKey,
secretKey: secretKey
)
]
)
}
try db.drop(table: "closedGroupKeyPair")
try db.rename(table: "tmpClosedGroupKeyPair", to: "closedGroupKeyPair")
// Add an index for the 'ClosedGroupKeyPair' so we can lookup existing keys more easily
//
// Note: Need to create the indexes separately from creating 'TmpClosedGroupKeyPair' to ensure they
// have the correct names
try db.createIndex(on: ClosedGroupKeyPair.self, columns: [.threadId])
try db.createIndex(on: ClosedGroupKeyPair.self, columns: [.receivedTimestamp])
try db.createIndex(on: ClosedGroupKeyPair.self, columns: [.threadKeyPairHash])
try db.createIndex(
on: ClosedGroupKeyPair.self,
columns: [.threadId, .threadKeyPairHash]
try db.create(
index: "closedGroupKeyPair_on_threadId",
on: "closedGroupKeyPair",
columns: ["threadId"]
)
try db.create(
index: "closedGroupKeyPair_on_receivedTimestamp",
on: "closedGroupKeyPair",
columns: ["receivedTimestamp"]
)
try db.create(
index: "closedGroupKeyPair_on_threadKeyPairHash",
on: "closedGroupKeyPair",
columns: ["threadKeyPairHash"]
)
try db.create(
index: "closedGroupKeyPair_on_threadId_and_threadKeyPairHash",
on: "closedGroupKeyPair",
columns: ["threadId", "threadKeyPairHash"]
)
// Add an index for the 'Quote' table to speed up queries
try db.createIndex(
on: Quote.self,
columns: [.timestampMs]
try db.create(
index: "quote_on_timestampMs",
on: "quote",
columns: ["timestampMs"]
)
// New table for storing the latest config dump for each type
try db.create(table: ConfigDump.self) { t in
t.column(.variant, .text)
try db.create(table: "configDump") { t in
t.column("variant", .text)
.notNull()
t.column(.publicKey, .text)
t.column("publicKey", .text)
.notNull()
.indexed()
t.column(.data, .blob)
t.column("data", .blob)
.notNull()
t.column(.timestampMs, .integer)
t.column("timestampMs", .integer)
.notNull()
.defaults(to: 0)
t.primaryKey([.variant, .publicKey])
t.primaryKey(["variant", "publicKey"])
}
// Migrate the 'isPinned' value to 'pinnedPriority'
try SessionThread
.filter(sql: "isPinned = true")
.updateAll(
db,
SessionThread.Columns.pinnedPriority.set(to: 1)
)
try db.execute(sql: """
UPDATE openGroup
SET pinnedPriority = 1
WHERE isPinned = true
""")
// If we don't have an ed25519 key then no need to create cached dump data
let userSessionId: SessionId = dependencies[cache: .general].sessionId
let userSessionId: SessionId = SessionId(
.standard,
publicKey: Array((try? Data.fetchOne(
db,
sql: "SELECT data FROM identity WHERE variant == ?",
arguments: [Identity.Variant.x25519PublicKey.rawValue]
)).defaulting(to: Data()))
)
/// Remove any hidden threads to avoid syncing them (they are basically shadow threads created by starting a conversation
/// but not sending a message so can just be cleared out)
@ -207,34 +194,46 @@ enum _013_SessionUtilChanges: Migration {
/// - Interaction
/// - ThreadTypingIndicator
/// - PendingReadReceipt
let threadIdsToDelete: [String] = try SessionThread
.filter(
SessionThread.Columns.shouldBeVisible == false &&
SessionThread.Columns.id != userSessionId.hexString
)
.select(.id)
.asRequest(of: String.self)
.fetchAll(db)
try SessionThread
.deleteAll(db, ids: threadIdsToDelete)
try DisappearingMessagesConfiguration
.filter(threadIdsToDelete.contains(DisappearingMessagesConfiguration.Columns.threadId))
.deleteAll(db)
try ClosedGroup
.filter(threadIdsToDelete.contains(ClosedGroup.Columns.threadId))
.deleteAll(db)
try GroupMember
.filter(threadIdsToDelete.contains(GroupMember.Columns.groupId))
.deleteAll(db)
try Interaction
.filter(threadIdsToDelete.contains(Interaction.Columns.threadId))
.deleteAll(db)
try ThreadTypingIndicator
.filter(threadIdsToDelete.contains(ThreadTypingIndicator.Columns.threadId))
.deleteAll(db)
try PendingReadReceipt
.filter(threadIdsToDelete.contains(PendingReadReceipt.Columns.threadId))
.deleteAll(db)
let threadIdsToDelete: [String] = try String.fetchAll(
db,
sql: """
SELECT id
FROM thread
WHERE (
shouldBeVisible = false AND
id != ?
)
""",
arguments: [userSessionId.hexString]
)
try db.execute(sql: """
DELETE FROM thread
WHERE id IN \(threadIdsToDelete)
""")
try db.execute(sql: """
DELETE FROM disappearingMessagesConfiguration
WHERE threadId IN \(threadIdsToDelete)
""")
try db.execute(sql: """
DELETE FROM closedGroup
WHERE threadId IN \(threadIdsToDelete)
""")
try db.execute(sql: """
DELETE FROM groupMember
WHERE groupId IN \(threadIdsToDelete)
""")
try db.execute(sql: """
DELETE FROM interaction
WHERE threadId IN \(threadIdsToDelete)
""")
try db.execute(sql: """
DELETE FROM threadTypingIndicator
WHERE threadId IN \(threadIdsToDelete)
""")
try db.execute(sql: """
DELETE FROM pendingReadReceipt
WHERE threadId IN \(threadIdsToDelete)
""")
/// There was previously a bug which allowed users to fully delete the 'Note to Self' conversation but we don't want that, so
/// create it again if it doesn't exists
@ -245,24 +244,24 @@ enum _013_SessionUtilChanges: Migration {
if (try SessionThread.exists(db, id: userSessionId.hexString)) == false {
try db.execute(
sql: """
INSERT INTO \(SessionThread.databaseTableName) (
\(SessionThread.Columns.id.name),
\(SessionThread.Columns.variant.name),
\(SessionThread.Columns.creationDateTimestamp.name),
\(SessionThread.Columns.shouldBeVisible.name),
"isPinned",
\(SessionThread.Columns.messageDraft.name),
\(SessionThread.Columns.notificationSound.name),
\(SessionThread.Columns.mutedUntilTimestamp.name),
\(SessionThread.Columns.onlyNotifyForMentions.name),
\(SessionThread.Columns.markedAsUnread.name),
\(SessionThread.Columns.pinnedPriority.name)
INSERT INTO thread (
id,
variant,
creationDateTimestamp,
shouldBeVisible,
isPinned,
messageDraft,
notificationSound,
mutedUntilTimestamp,
onlyNotifyForMentions,
markedAsUnread,
pinnedPriority
)
VALUES (?, ?, ?, ?, ?, NULL, NULL, NULL, ?, ?, ?)
""",
arguments: [
userSessionId.hexString,
SessionThread.Variant.contact,
SessionThread.Variant.contact.rawValue,
(dependencies[cache: .snodeAPI].currentOffsetTimestampMs() / 1000),
LibSession.shouldBeVisible(priority: LibSession.hiddenPriority),
false,

@ -20,13 +20,29 @@ enum _014_GenerateInitialUserConfigDumps: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
// If we have no ed25519 key then there is no need to create cached dump data
guard Identity.fetchUserEd25519KeyPair(db) != nil else {
guard
let numEdSecretKeys: Int = try? Int.fetchOne(
db,
sql: "SELECT COUNT(*) FROM identity WHERE variant == ?",
arguments: [
Identity.Variant.ed25519SecretKey.rawValue
]
),
numEdSecretKeys > 0
else {
Storage.update(progress: 1, for: self, in: target, using: dependencies)
return
}
// Create the initial config state
let userSessionId: SessionId = dependencies[cache: .general].sessionId
let userSessionId: SessionId = SessionId(
.standard,
publicKey: Array((try? Data.fetchOne(
db,
sql: "SELECT data FROM identity WHERE variant == ?",
arguments: [Identity.Variant.x25519PublicKey.rawValue]
)).defaulting(to: Data()))
)
let timestampMs: Int64 = Int64(dependencies.dateNow.timeIntervalSince1970 * TimeInterval(1000))
let cache: LibSession.Cache = LibSession.Cache(userSessionId: userSessionId, using: dependencies)
@ -35,9 +51,9 @@ enum _014_GenerateInitialUserConfigDumps: Migration {
// Retrieve all threads (we are going to base the config dump data on the active
// threads rather than anything else in the database)
let allThreads: [String: SessionThread] = try SessionThread
.fetchAll(db)
.reduce(into: [:]) { result, next in result[next.id] = next }
let allThreads: [String: Row] = try Row
.fetchAll(db, sql: "SELECT * FROM thread")
.reduce(into: [:]) { result, next in result[next["id"]] = next }
// MARK: - UserProfile Config Dump
@ -50,11 +66,11 @@ enum _014_GenerateInitialUserConfigDumps: Migration {
try LibSession.updateNoteToSelf(
priority: {
guard allThreads[userSessionId.hexString]?.shouldBeVisible == true else {
guard allThreads[userSessionId.hexString]?["shouldBeVisible"] == true else {
return LibSession.hiddenPriority
}
return Int32(allThreads[userSessionId.hexString]?.pinnedPriority ?? 0)
return Int32(allThreads[userSessionId.hexString]?["pinnedPriority"] ?? 0)
}(),
in: userProfileConfig
)
@ -75,11 +91,11 @@ enum _014_GenerateInitialUserConfigDumps: Migration {
let validContactIds: [String] = allThreads
.values
.filter { thread in
thread.variant == .contact &&
thread.id != userSessionId.hexString &&
(try? SessionId(from: thread.id))?.prefix == .standard
thread["variant"] == SessionThread.Variant.contact.rawValue &&
thread["id"] != userSessionId.hexString &&
(try? SessionId(from: thread["id"]))?.prefix == .standard
}
.map { $0.id }
.map { $0["id"] }
let contactsData: [ContactInfo] = try Contact
.filter(
Contact.Columns.isBlocked == true ||

@ -18,17 +18,38 @@ enum _015_BlockCommunityMessageRequests: Migration {
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
// Add the new 'Profile' properties
try db.alter(table: Profile.self) { t in
t.add(.blocksCommunityMessageRequests, .boolean)
t.add(.lastBlocksCommunityMessageRequests, .integer).defaults(to: 0)
try db.alter(table: "profile") { t in
t.add(column: "blocksCommunityMessageRequests", .boolean)
t.add(column: "lastBlocksCommunityMessageRequests", .integer).defaults(to: 0)
}
// If the user exists and the 'checkForCommunityMessageRequests' hasn't already been set then default it to "false"
if
Identity.userExists(db, using: dependencies),
(try Setting.exists(db, id: Setting.BoolKey.checkForCommunityMessageRequests.rawValue)) == false
let numEdSecretKeys: Int = try? Int.fetchOne(
db,
sql: "SELECT COUNT(*) FROM identity WHERE variant == ?",
arguments: [
Identity.Variant.ed25519SecretKey.rawValue
]
),
numEdSecretKeys > 0,
let numSettings: Int = try? Int.fetchOne(
db,
sql: "SELECT COUNT(*) FROM setting WHERE key == ?",
arguments: [
Setting.BoolKey.checkForCommunityMessageRequests.rawValue
]
),
numSettings == 0
{
let userSessionId: SessionId = dependencies[cache: .general].sessionId
let userSessionId: SessionId = SessionId(
.standard,
publicKey: Array((try? Data.fetchOne(
db,
sql: "SELECT data FROM identity WHERE variant == ?",
arguments: [Identity.Variant.x25519PublicKey.rawValue]
)).defaulting(to: Data()))
)
let rawBlindedMessageRequestValue: Int32 = try dependencies.mutate(cache: .libSession) { cache in
try LibSession.rawBlindedMessageRequestValue(
in: cache.config(for: .userProfile, sessionId: userSessionId)
@ -36,10 +57,28 @@ enum _015_BlockCommunityMessageRequests: Migration {
}
// Use the value in the config if we happen to have one, otherwise use the default
db[.checkForCommunityMessageRequests] = (rawBlindedMessageRequestValue < 0 ?
try db.execute(sql: """
DELETE FROM setting
WHERE key = \(Setting.BoolKey.checkForCommunityMessageRequests.rawValue)
""")
var targetValue: Bool = (rawBlindedMessageRequestValue < 0 ?
true :
(rawBlindedMessageRequestValue > 0)
)
let boolAsData: Data = withUnsafeBytes(of: &targetValue) { Data($0) }
try db.execute(
sql: """
INSERT INTO setting (key, value)
VALUES (?, ?)
SET pinnedPriority = 1
WHERE isPinned = true
""",
arguments: [
Setting.BoolKey.checkForCommunityMessageRequests.rawValue,
boolAsData
]
)
}
Storage.update(progress: 1, for: self, in: target, using: dependencies)

@ -16,64 +16,30 @@ enum _016_MakeBrokenProfileTimestampsNullable: Migration {
static let droppedTables: [(TableRecord & FetchableRecord).Type] = []
static func migrate(_ db: Database, using dependencies: Dependencies) throws {
/// SQLite doesn't support altering columns after creation so we need to create a new table with the setup we
/// want, copy data from the old table over, drop the old table and rename the new table
struct TmpProfile: Codable, TableRecord, FetchableRecord, PersistableRecord, ColumnExpressible {
static var databaseTableName: String { "tmpProfile" }
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case id
case name
case lastNameUpdate
case nickname
case profilePictureUrl
case profilePictureFileName
case profileEncryptionKey
case lastProfilePictureUpdate
case blocksCommunityMessageRequests
case lastBlocksCommunityMessageRequests
}
public let id: String
public let name: String
public let lastNameUpdate: TimeInterval?
public let nickname: String?
public let profilePictureUrl: String?
public let profilePictureFileName: String?
public let profileEncryptionKey: Data?
public let lastProfilePictureUpdate: TimeInterval?
public let blocksCommunityMessageRequests: Bool?
public let lastBlocksCommunityMessageRequests: TimeInterval?
}
try db.create(table: TmpProfile.self) { t in
t.column(.id, .text)
try db.create(table: "tmpProfile") { t in
t.column("id", .text)
.notNull()
.primaryKey()
t.column(.name, .text).notNull()
t.column(.nickname, .text)
t.column(.profilePictureUrl, .text)
t.column(.profilePictureFileName, .text)
t.column(.profileEncryptionKey, .blob)
t.column(.lastNameUpdate, .integer).defaults(to: 0)
t.column(.lastProfilePictureUpdate, .integer).defaults(to: 0)
t.column(.blocksCommunityMessageRequests, .boolean)
t.column(.lastBlocksCommunityMessageRequests, .integer).defaults(to: 0)
t.column("name", .text).notNull()
t.column("nickname", .text)
t.column("profilePictureUrl", .text)
t.column("profilePictureFileName", .text)
t.column("profileEncryptionKey", .blob)
t.column("lastNameUpdate", .integer).defaults(to: 0)
t.column("lastProfilePictureUpdate", .integer).defaults(to: 0)
t.column("blocksCommunityMessageRequests", .boolean)
t.column("lastBlocksCommunityMessageRequests", .integer).defaults(to: 0)
}
// Insert into the new table, drop the old table and rename the new table to be the old one
try db.execute(sql: """
INSERT INTO \(TmpProfile.databaseTableName)
SELECT \(Profile.databaseTableName).*
FROM \(Profile.databaseTableName)
INSERT INTO tmpProfile
SELECT profile.*
FROM profile
""")
try db.drop(table: Profile.self)
try db.rename(table: TmpProfile.databaseTableName, to: Profile.databaseTableName)
try db.drop(table: "profile")
try db.rename(table: "tmpProfile", to: "profile")
Storage.update(progress: 1, for: self, in: target, using: dependencies)
}

@ -49,10 +49,11 @@ public struct ClosedGroupKeyPair: Codable, Equatable, FetchableRecord, Persistab
// This value has a unique constraint and is used for key de-duping so the formula
// shouldn't be modified unless all existing keys have their values updated
self.threadKeyPairHash = Data(Insecure.MD5
.hash(data: threadId.bytes + publicKey.bytes + secretKey.bytes)
.makeIterator())
.toHexString()
self.threadKeyPairHash = ClosedGroupKeyPair.generateHash(
threadId: threadId,
publicKey: publicKey,
secretKey: secretKey
)
}
}
@ -66,3 +67,14 @@ public extension ClosedGroupKeyPair {
.fetchOne(db)
}
}
// MARK: - Convenience
public extension ClosedGroupKeyPair {
static func generateHash(threadId: String, publicKey: Data, secretKey: Data) -> String {
return Data(Insecure.MD5
.hash(data: threadId.bytes + publicKey.bytes + secretKey.bytes)
.makeIterator())
.toHexString()
}
}

Loading…
Cancel
Save