Removed some unneeded code and fixed a couple of bugs

• Removed the 'runOnceTransient' behaviour (no longer have jobs that run before the user exists)
• Removed the session id from the message snippet in the conversation list
• Fixed an issue where the SyncPushTokensJob might not run because the paths hadn't been built yet
pull/960/head
Morgan Pretty 2 months ago
parent 5ee15bbc3f
commit 99abcdebf6

@ -946,7 +946,7 @@
FDF848E429405D6E007DCAE5 /* SnodeAPIEndpoint.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848DF29405D6E007DCAE5 /* SnodeAPIEndpoint.swift */; };
FDF848E529405D6E007DCAE5 /* SnodeAPIError.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848E029405D6E007DCAE5 /* SnodeAPIError.swift */; };
FDF848E629405D6E007DCAE5 /* OnionRequestAPIDestination.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848E129405D6E007DCAE5 /* OnionRequestAPIDestination.swift */; };
FDF848EB29405E4F007DCAE5 /* OnionRequestAPI.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848E829405E4E007DCAE5 /* OnionRequestAPI.swift */; };
FDF848EB29405E4F007DCAE5 /* Network+OnionRequest.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848E829405E4E007DCAE5 /* Network+OnionRequest.swift */; };
FDF848EF294067E4007DCAE5 /* URLResponse+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848EE294067E4007DCAE5 /* URLResponse+Utilities.swift */; };
FDF848F129406A30007DCAE5 /* Format.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848F029406A30007DCAE5 /* Format.swift */; };
FDF848F329413DB0007DCAE5 /* ImagePickerHandler.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF848F229413DB0007DCAE5 /* ImagePickerHandler.swift */; };
@ -2136,7 +2136,7 @@
FDF848DF29405D6E007DCAE5 /* SnodeAPIEndpoint.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SnodeAPIEndpoint.swift; sourceTree = "<group>"; };
FDF848E029405D6E007DCAE5 /* SnodeAPIError.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SnodeAPIError.swift; sourceTree = "<group>"; };
FDF848E129405D6E007DCAE5 /* OnionRequestAPIDestination.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = OnionRequestAPIDestination.swift; sourceTree = "<group>"; };
FDF848E829405E4E007DCAE5 /* OnionRequestAPI.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = OnionRequestAPI.swift; sourceTree = "<group>"; };
FDF848E829405E4E007DCAE5 /* Network+OnionRequest.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "Network+OnionRequest.swift"; sourceTree = "<group>"; };
FDF848EE294067E4007DCAE5 /* URLResponse+Utilities.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "URLResponse+Utilities.swift"; sourceTree = "<group>"; };
FDF848F029406A30007DCAE5 /* Format.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; name = Format.swift; path = "SessionUIKit/Style Guide/Format.swift"; sourceTree = SOURCE_ROOT; };
FDF848F229413DB0007DCAE5 /* ImagePickerHandler.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ImagePickerHandler.swift; sourceTree = "<group>"; };
@ -4543,7 +4543,7 @@
FDF8489329405C1B007DCAE5 /* SnodeAPI.swift */,
FD7F747F2BB283A9006DDFD8 /* Request+SnodeAPI.swift */,
FD7F74852BB2868E006DDFD8 /* ResponseInfo+SnodeAPI.swift */,
FDF848E829405E4E007DCAE5 /* OnionRequestAPI.swift */,
FDF848E829405E4E007DCAE5 /* Network+OnionRequest.swift */,
FD7F747B2BB28182006DDFD8 /* PreparedRequest+OnionRequest.swift */,
);
path = Networking;
@ -5956,7 +5956,7 @@
FDF848CF29405C5B007DCAE5 /* SendMessageRequest.swift in Sources */,
FD6A7A6D2818C61500035AC1 /* _002_SetupStandardJobs.swift in Sources */,
FDF848E429405D6E007DCAE5 /* SnodeAPIEndpoint.swift in Sources */,
FDF848EB29405E4F007DCAE5 /* OnionRequestAPI.swift in Sources */,
FDF848EB29405E4F007DCAE5 /* Network+OnionRequest.swift in Sources */,
FD17D7AE27F41C4300122BE0 /* SnodeReceivedMessageInfo.swift in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;

@ -395,13 +395,15 @@ final class EditClosedGroupVC: BaseVC, UITableViewDataSource, UITableViewDelegat
for: .contact,
id: lhs.profileId,
name: lhs.profile?.name,
nickname: lhs.profile?.nickname
nickname: lhs.profile?.nickname,
suppressId: false
)
let rhsDisplayName: String = Profile.displayName(
for: .contact,
id: rhs.profileId,
name: rhs.profile?.name,
nickname: rhs.profile?.nickname
nickname: rhs.profile?.nickname,
suppressId: false
)
return (lhsDisplayName < rhsDisplayName)

@ -53,6 +53,7 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
// Create AppEnvironment
AppEnvironment.shared.setup()
LibSession.createNetworkIfNeeded()
LibSession.addNetworkLogger()
// Note: Intentionally dispatching sync as we want to wait for these to complete before
// continuing
@ -609,10 +610,6 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
// Navigate to the approriate screen depending on the onboarding state
switch Onboarding.State.current {
case .newUser:
/// Enable single-execution jobs (this allows fetching the snode pool, building paths and fetching the swarm for
/// retrieving the profile name when restoring an account before the account is properly created)
JobRunner.enableNewSingleExecutionJobsOnly()
DispatchQueue.main.async {
let viewController: LandingVC = LandingVC()
populateHomeScreenTimer.invalidate()

@ -79,61 +79,75 @@ public class AppEnvironment {
// to a local directory (so they can be exported via XCode) - the below code reads any
// logs from the shared directly and attempts to add them to the main app logs to make
// debugging user issues in extensions easier
DispatchQueue.global(qos: .background).async { [fileLogger] in
let extensionInfo: [(dir: String, type: ExtensionType)] = [
("\(OWSFileSystem.appSharedDataDirectoryPath())/Logs/NotificationExtension", .notification),
("\(OWSFileSystem.appSharedDataDirectoryPath())/Logs/ShareExtension", .share)
]
let extensionLogs: [(path: String, type: ExtensionType)] = extensionInfo.flatMap { dir, type -> [(path: String, type: ExtensionType)] in
guard let files: [String] = try? FileManager.default.contentsOfDirectory(atPath: dir) else { return [] }
return files.map { ("\(dir)/\($0)", type) }
DispatchQueue.global(qos: .utility).async { [fileLogger] in
guard let currentLogFileInfo: DDLogFileInfo = fileLogger.currentLogFileInfo else {
return SNLog("Unable to retrieve current log file.")
}
// Log to ensure the log file exists
OWSLogger.info("")
DDLog.flushLog()
do {
guard
let currentLogFileInfo: DDLogFileInfo = fileLogger.currentLogFileInfo,
let fileHandle: FileHandle = FileHandle(forWritingAtPath: currentLogFileInfo.filePath)
else { throw StorageError.objectNotFound }
// Ensure we close the file handle
defer { fileHandle.closeFile() }
DDLog.loggingQueue.async {
let extensionInfo: [(dir: String, type: ExtensionType)] = [
("\(OWSFileSystem.appSharedDataDirectoryPath())/Logs/NotificationExtension", .notification),
("\(OWSFileSystem.appSharedDataDirectoryPath())/Logs/ShareExtension", .share)
]
let extensionLogs: [(path: String, type: ExtensionType)] = extensionInfo.flatMap { dir, type -> [(path: String, type: ExtensionType)] in
guard let files: [String] = try? FileManager.default.contentsOfDirectory(atPath: dir) else { return [] }
return files.map { ("\(dir)/\($0)", type) }
}
// Move to the end of the file to insert the logs
if #available(iOS 13.4, *) { try fileHandle.seekToEnd() }
else { fileHandle.seekToEndOfFile() }
try extensionLogs
.grouped(by: \.type)
.forEach { type, value in
guard
let typeNameStartData: Data = "🧩 \(type.name) -- Start\n".data(using: .utf8),
let typeNameEndData: Data = "🧩 \(type.name) -- End\n".data(using: .utf8)
else { throw StorageError.invalidData }
// Write the type start separator
if #available(iOS 13.4, *) { try fileHandle.write(contentsOf: typeNameStartData) }
else { fileHandle.write(typeNameStartData) }
// Write the logs
try value.forEach { path, _ in
let logData: Data = try Data(contentsOf: URL(fileURLWithPath: path))
if #available(iOS 13.4, *) { try fileHandle.write(contentsOf: logData) }
else { fileHandle.write(logData) }
do {
guard let fileHandle: FileHandle = FileHandle(forWritingAtPath: currentLogFileInfo.filePath) else {
throw StorageError.objectNotFound
}
// Ensure we close the file handle
defer { fileHandle.closeFile() }
// Move to the end of the file to insert the logs
if #available(iOS 13.4, *) { try fileHandle.seekToEnd() }
else { fileHandle.seekToEndOfFile() }
try extensionLogs
.grouped(by: \.type)
.forEach { type, value in
guard !value.isEmpty else { return } // Ignore if there are no logs
guard
let typeNameStartData: Data = "🧩 \(type.name) -- Start\n".data(using: .utf8),
let typeNameEndData: Data = "🧩 \(type.name) -- End\n".data(using: .utf8)
else { throw StorageError.invalidData }
// Extension logs have been writen to the app logs, remove them now
try? FileManager.default.removeItem(atPath: path)
var hasWrittenStartLog: Bool = false
// Write the logs
try value.forEach { path, _ in
let logData: Data = try Data(contentsOf: URL(fileURLWithPath: path))
guard !logData.isEmpty else { return } // Ignore empty files
// Write the type start separator if needed
if !hasWrittenStartLog {
if #available(iOS 13.4, *) { try fileHandle.write(contentsOf: typeNameStartData) }
else { fileHandle.write(typeNameStartData) }
hasWrittenStartLog = true
}
// Write the log data to the log file
if #available(iOS 13.4, *) { try fileHandle.write(contentsOf: logData) }
else { fileHandle.write(logData) }
// Extension logs have been writen to the app logs, remove them now
try? FileManager.default.removeItem(atPath: path)
}
// Write the type end separator if needed
if hasWrittenStartLog {
if #available(iOS 13.4, *) { try fileHandle.write(contentsOf: typeNameEndData) }
else { fileHandle.write(typeNameEndData) }
}
}
// Write the type end separator
if #available(iOS 13.4, *) { try fileHandle.write(contentsOf: typeNameEndData) }
else { fileHandle.write(typeNameEndData) }
}
}
catch { SNLog("Unable to write extension logs to current log file") }
}
catch { SNLog("Unable to write extension logs to current log file") }
}
}
}

@ -107,9 +107,25 @@ public enum SyncPushTokensJob: JobExecutor {
/// https://developer.apple.com/library/archive/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/HandlingRemoteNotifications.html#//apple_ref/doc/uid/TP40008194-CH6-SW1
SNLog("[SyncPushTokensJob] Re-registering for remote notifications")
PushRegistrationManager.shared.requestPushTokens()
.flatMap { (pushToken: String, voipToken: String) -> AnyPublisher<Void, Error> in
guard !LibSession.hasPaths else {
SNLog("[SyncPushTokensJob] OS subscription completed, skipping server subscription due to lack of paths")
.flatMap { (pushToken: String, voipToken: String) -> AnyPublisher<(String, String)?, Error> in
Deferred {
Future<(String, String)?, Error> { resolver in
_ = LibSession.onPathsChanged(skipInitialCallbackIfEmpty: true) { paths, pathsChangedId in
// Only listen for the first callback
LibSession.removePathsChangedCallback(callbackId: pathsChangedId)
guard !paths.isEmpty else {
SNLog("[SyncPushTokensJob] OS subscription completed, skipping server subscription due to lack of paths")
return resolver(Result.success(nil))
}
resolver(Result.success((pushToken, voipToken)))
}
}
}.eraseToAnyPublisher()
}
.flatMap { (tokenInfo: (String, String)?) -> AnyPublisher<Void, Error> in
guard let (pushToken, voipToken): (String, String) = tokenInfo else {
return Just(())
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
@ -211,7 +227,7 @@ extension SyncPushTokensJob {
private func redact(_ string: String) -> String {
#if DEBUG
return string
return "[ DEBUG_NOT_REDACTED \(string) ]" // stringlint:disable
#else
return "[ READACTED \(string.prefix(2))...\(string.suffix(2)) ]" // stringlint:disable
#endif

@ -120,7 +120,7 @@ final class PathVC: BaseVC {
topSpacer.heightAnchor.constraint(equalTo: bottomSpacer.heightAnchor).isActive = true
// Register for status updates (will be called immediately with current paths)
pathUpdateId = LibSession.onPathsChanged { [weak self] paths in
pathUpdateId = LibSession.onPathsChanged { [weak self] paths, _ in
DispatchQueue.main.async {
self?.update(paths: paths, force: false)
}

@ -49,7 +49,7 @@ public enum IP2Country {
pathsChangedCallbackId.mutate { pathsChangedCallbackId in
guard pathsChangedCallbackId == nil else { return }
pathsChangedCallbackId = LibSession.onPathsChanged(callback: { paths in
pathsChangedCallbackId = LibSession.onPathsChanged(callback: { paths, _ in
self.populateCacheIfNeeded(paths: paths)
})
}

@ -342,7 +342,7 @@ public extension Profile {
/// The name to display in the UI for a given thread variant
func displayName(for threadVariant: SessionThread.Variant = .contact) -> String {
return Profile.displayName(for: threadVariant, id: id, name: name, nickname: nickname)
return Profile.displayName(for: threadVariant, id: id, name: name, nickname: nickname, suppressId: false)
}
static func displayName(
@ -350,6 +350,7 @@ public extension Profile {
id: String,
name: String?,
nickname: String?,
suppressId: Bool,
customFallback: String? = nil
) -> String {
if let nickname: String = nickname, !nickname.isEmpty { return nickname }
@ -358,10 +359,10 @@ public extension Profile {
return (customFallback ?? Profile.truncated(id: id, threadVariant: threadVariant))
}
switch threadVariant {
case .contact, .legacyGroup, .group: return name
switch (threadVariant, suppressId) {
case (.contact, _), (.legacyGroup, _), (.group, _), (.community, true): return name
case .community:
case (.community, false):
// In open groups, where it's more likely that multiple users have the same name,
// we display a bit of the Session ID after a user's display name for added context
return "\(name) (\(Profile.truncated(id: id, truncating: .middle)))"

@ -354,9 +354,10 @@ public extension Message {
/// closed group key update messages (the `NotificationServiceExtension` does this itself)
static func processRawReceivedMessageAsNotification(
_ db: Database,
envelope: SNProtoEnvelope,
data: Data,
using dependencies: Dependencies = Dependencies()
) throws -> ProcessedMessage? {
let envelope: SNProtoEnvelope = try MessageWrapper.unwrap(data: data)
let processedMessage: ProcessedMessage? = try processRawReceivedMessage(
db,
envelope: envelope,

@ -1,9 +1,10 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionSnodeKit
extension PushNotificationAPI {
struct NotificationMetadata: Codable {
public struct NotificationMetadata: Codable {
private enum CodingKeys: String, CodingKey {
case accountId = "@"
case hash = "#"
@ -15,39 +16,43 @@ extension PushNotificationAPI {
}
/// Account ID (such as Session ID or closed group ID) where the message arrived.
let accountId: String
public let accountId: String
/// The hash of the message in the swarm.
let hash: String
public let hash: String
/// The swarm namespace in which this message arrived.
let namespace: Int
public let namespace: SnodeAPI.Namespace
/// The swarm timestamp when the message was created (unix epoch milliseconds)
let createdTimestampMs: Int64
public let createdTimestampMs: Int64
/// The message's swarm expiry timestamp (unix epoch milliseconds)
let expirationTimestampMs: Int64
public let expirationTimestampMs: Int64
/// The length of the message data. This is always included, even if the message content
/// itself was too large to fit into the push notification.
let dataLength: Int
public let dataLength: Int
/// This will be `true` if the data was omitted because it was too long to fit in a push
/// notification (around 2.5kB of raw data), in which case the push notification includes
/// only this metadata but not the message content itself.
let dataTooLong: Bool
public let dataTooLong: Bool
}
}
extension PushNotificationAPI.NotificationMetadata {
init(from decoder: Decoder) throws {
public init(from decoder: Decoder) throws {
let container: KeyedDecodingContainer<CodingKeys> = try decoder.container(keyedBy: CodingKeys.self)
let namespace: SnodeAPI.Namespace = SnodeAPI.Namespace(
rawValue: try container.decode(Int.self, forKey: .namespace)
).defaulting(to: .unknown)
self = PushNotificationAPI.NotificationMetadata(
accountId: try container.decode(String.self, forKey: .accountId),
hash: try container.decode(String.self, forKey: .hash),
namespace: try container.decode(Int.self, forKey: .namespace),
namespace: namespace,
createdTimestampMs: try container.decode(Int64.self, forKey: .createdTimestampMs),
expirationTimestampMs: try container.decode(Int64.self, forKey: .expirationTimestampMs),
dataLength: try container.decode(Int.self, forKey: .dataLength),
@ -55,3 +60,33 @@ extension PushNotificationAPI.NotificationMetadata {
)
}
}
// MARK: - Convenience
extension PushNotificationAPI.NotificationMetadata {
static var invalid: PushNotificationAPI.NotificationMetadata {
PushNotificationAPI.NotificationMetadata(
accountId: "",
hash: "",
namespace: .unknown,
createdTimestampMs: 0,
expirationTimestampMs: 0,
dataLength: 0,
dataTooLong: false
)
}
static func legacyGroupMessage(envelope: SNProtoEnvelope) throws -> PushNotificationAPI.NotificationMetadata {
guard let publicKey: String = envelope.source else { throw MessageReceiverError.invalidMessage }
return PushNotificationAPI.NotificationMetadata(
accountId: publicKey,
hash: "",
namespace: .legacyClosedGroup,
createdTimestampMs: 0,
expirationTimestampMs: 0,
dataLength: 0,
dataTooLong: false
)
}
}

@ -383,33 +383,36 @@ public enum PushNotificationAPI {
public static func processNotification(
notificationContent: UNNotificationContent,
dependencies: Dependencies = Dependencies()
) -> (envelope: SNProtoEnvelope?, result: ProcessResult) {
) -> (data: Data?, metadata: NotificationMetadata, result: ProcessResult) {
// Make sure the notification is from the updated push server
guard notificationContent.userInfo["spns"] != nil else {
guard
let base64EncodedData: String = notificationContent.userInfo["ENCRYPTED_DATA"] as? String,
let data: Data = Data(base64Encoded: base64EncodedData),
let envelope: SNProtoEnvelope = try? MessageWrapper.unwrap(data: data)
else { return (nil, .legacyFailure) }
let data: Data = Data(base64Encoded: base64EncodedData)
else { return (nil, .invalid, .legacyFailure) }
// We only support legacy notifications for legacy group conversations
guard envelope.type == .closedGroupMessage else { return (envelope, .legacyForceSilent) }
guard
let envelope: SNProtoEnvelope = try? MessageWrapper.unwrap(data: data),
envelope.type == .closedGroupMessage,
let metadata: NotificationMetadata = try? .legacyGroupMessage(envelope: envelope)
else { return (data, .invalid, .legacyForceSilent) }
return (envelope, .legacySuccess)
return (data, metadata, .legacySuccess)
}
guard let base64EncodedEncString: String = notificationContent.userInfo["enc_payload"] as? String else {
return (nil, .failureNoContent)
return (nil, .invalid, .failureNoContent)
}
guard
let encData: Data = Data(base64Encoded: base64EncodedEncString),
let encryptedData: Data = Data(base64Encoded: base64EncodedEncString),
let notificationsEncryptionKey: Data = try? getOrGenerateEncryptionKey(using: dependencies),
encData.count > dependencies.crypto.size(.aeadXChaCha20NonceBytes)
else { return (nil, .failure) }
encryptedData.count > dependencies.crypto.size(.aeadXChaCha20NonceBytes)
else { return (nil, .invalid, .failure) }
let nonce: Data = encData[0..<dependencies.crypto.size(.aeadXChaCha20NonceBytes)]
let payload: Data = encData[dependencies.crypto.size(.aeadXChaCha20NonceBytes)...]
let nonce: Data = encryptedData[0..<dependencies.crypto.size(.aeadXChaCha20NonceBytes)]
let payload: Data = encryptedData[dependencies.crypto.size(.aeadXChaCha20NonceBytes)...]
guard
let paddedData: [UInt8] = try? dependencies.crypto.perform(
@ -419,28 +422,27 @@ public enum PushNotificationAPI {
nonce: nonce.bytes
)
)
else { return (nil, .failure) }
else { return (nil, .invalid, .failure) }
let decryptedData: Data = Data(paddedData.reversed().drop(while: { $0 == 0 }).reversed())
// Decode the decrypted data
guard let notification: BencodeResponse<NotificationMetadata> = try? Bencode.decodeResponse(from: decryptedData) else {
return (nil, .failure)
return (nil, .invalid, .failure)
}
// If the metadata says that the message was too large then we should show the generic
// notification (this is a valid case)
guard !notification.info.dataTooLong else { return (nil, .successTooLong) }
guard !notification.info.dataTooLong else { return (nil, notification.info, .successTooLong) }
// Check that the body we were given is valid
guard
let notificationData: Data = notification.data,
notification.info.dataLength == notificationData.count,
let envelope = try? MessageWrapper.unwrap(data: notificationData)
else { return (nil, .failure) }
notification.info.dataLength == notificationData.count
else { return (nil, notification.info, .failure) }
// Success, we have the notification content
return (envelope, .success)
return (notificationData, notification.info, .success)
}
// MARK: - Security

@ -306,7 +306,8 @@ public struct MessageViewModel: FetchableRecordWithRowId, Decodable, Equatable,
for: self.threadVariant,
id: self.authorId,
name: self.authorNameInternal,
nickname: nil // Folded into 'authorName' within the Query
nickname: nil, // Folded into 'authorName' within the Query
suppressId: false // Show the id next to the author name if desired
)
let shouldShowDateBeforeThisModel: Bool = {
guard self.isTypingIndicator != true else { return false }
@ -404,7 +405,8 @@ public struct MessageViewModel: FetchableRecordWithRowId, Decodable, Equatable,
for: self.threadVariant,
id: self.threadId,
name: self.threadContactNameInternal,
nickname: nil // Folded into 'threadContactNameInternal' within the Query
nickname: nil, // Folded into 'threadContactNameInternal' within the Query
suppressId: false // Show the id next to the author name if desired
),
authorDisplayName: authorDisplayName,
attachmentDescriptionInfo: self.attachments?.first.map { firstAttachment in

@ -236,7 +236,8 @@ public struct SessionThreadViewModel: FetchableRecordWithRowId, Decodable, Equat
for: .contact,
id: threadId,
name: threadContactNameInternal,
nickname: nil, // Folded into 'threadContactNameInternal' within the Query
nickname: nil, // Folded into 'threadContactNameInternal' within the Query
suppressId: true, // Don't include the account id in the name in the conversation list
customFallback: "Anonymous"
)
}
@ -251,7 +252,8 @@ public struct SessionThreadViewModel: FetchableRecordWithRowId, Decodable, Equat
for: threadVariant,
id: (authorId ?? threadId),
name: authorNameInternal,
nickname: nil, // Folded into 'authorName' within the Query
nickname: nil, // Folded into 'authorName' within the Query
suppressId: true, // Don't include the account id in the name in the conversation list
customFallback: (threadVariant == .contact ?
"Anonymous" :
nil

@ -66,13 +66,13 @@ public final class NotificationServiceExtension: UNNotificationServiceExtension
)
}
let (maybeEnvelope, result) = PushNotificationAPI.processNotification(
let (maybeData, metadata, result) = PushNotificationAPI.processNotification(
notificationContent: notificationContent
)
guard
(result == .success || result == .legacySuccess),
let envelope: SNProtoEnvelope = maybeEnvelope
let data: Data = maybeData
else {
switch result {
// If we got an explicit failure, or we got a success but no content then show
@ -80,17 +80,10 @@ public final class NotificationServiceExtension: UNNotificationServiceExtension
case .success, .legacySuccess, .failure, .legacyFailure:
return self.handleFailure(for: notificationContent, error: .processing(result))
// Just log if the notification was too long (a ~2k message should be able to fit so
// these will most commonly be call or config messages)
case .successTooLong:
/// If the notification is too long and there is an ongoing call or a recent call pre-offer then we assume the notification
/// is a call `ICE_CANDIDATES` message and just complete silently (because the fallback would be annoying), if not
/// then we do want to show the fallback notification
guard
isCallOngoing ||
(lastCallPreOffer ?? Date.distantPast).timeIntervalSinceNow < NotificationServiceExtension.callPreOfferLargeNotificationSupressionDuration
else { return self.handleFailure(for: notificationContent, error: .processing(result)) }
SNLog("[NotificationServiceExtension] Suppressing large notification too close to a call.", forceNSLog: true)
return
return SNLog("[NotificationServiceExtension] Received too long notification for namespace: \(metadata.namespace).", forceNSLog: true)
case .legacyForceSilent, .failureNoContent: return
}
@ -101,7 +94,7 @@ public final class NotificationServiceExtension: UNNotificationServiceExtension
// is added to notification center
Storage.shared.write { db in
do {
guard let processedMessage: ProcessedMessage = try Message.processRawReceivedMessageAsNotification(db, envelope: envelope) else {
guard let processedMessage: ProcessedMessage = try Message.processRawReceivedMessageAsNotification(db, data: data) else {
self.handleFailure(for: notificationContent, error: .messageProcessing)
return
}
@ -302,8 +295,6 @@ public final class NotificationServiceExtension: UNNotificationServiceExtension
// Note that this does much more than set a flag; it will also run all deferred blocks.
Singleton.appReadiness.setAppReady()
JobRunner.enableNewSingleExecutionJobsOnly()
}
// MARK: Handle completion

@ -153,7 +153,6 @@ final class ShareNavController: UINavigationController, ShareViewDelegate {
// We don't need to use SyncPushTokensJob in the SAE.
// We don't need to use DeviceSleepManager in the SAE.
JobRunner.enableNewSingleExecutionJobsOnly()
AppVersion.sharedInstance().saeLaunchDidComplete()
showLockScreenOrMainContent()

@ -6,38 +6,25 @@ import Foundation
import Combine
import SessionUtil
import SessionUtilitiesKit
import SignalCoreKit
// MARK: - LibSession
public extension LibSession {
typealias CSNode = network_service_node
private static let desiredLogCategories: [LogCategory] = [.network]
private static var networkCache: Atomic<UnsafeMutablePointer<network_object>?> = Atomic(nil)
private static var snodeCachePath: String { "\(OWSFileSystem.appSharedDataDirectoryPath())/snodeCache" }
private static var lastPaths: Atomic<[Set<CSNode>]> = Atomic([])
private static var lastNetworkStatus: Atomic<NetworkStatus> = Atomic(.unknown)
private static var pathsChangedCallbacks: Atomic<[UUID: ([Set<CSNode>]) -> ()]> = Atomic([:])
private static var pathsChangedCallbacks: Atomic<[UUID: ([Set<CSNode>], UUID) -> ()]> = Atomic([:])
private static var networkStatusCallbacks: Atomic<[UUID: (NetworkStatus) -> ()]> = Atomic([:])
static var hasPaths: Bool { !lastPaths.wrappedValue.isEmpty }
static var pathsDescription: String { lastPaths.wrappedValue.prettifiedDescription }
enum NetworkStatus {
case unknown
case connecting
case connected
case disconnected
init(status: CONNECTION_STATUS) {
switch status {
case CONNECTION_STATUS_CONNECTING: self = .connecting
case CONNECTION_STATUS_CONNECTED: self = .connected
case CONNECTION_STATUS_DISCONNECTED: self = .disconnected
default: self = .unknown
}
}
}
typealias NodesCallback = (UnsafeMutablePointer<CSNode>?, Int) -> Void
typealias NetworkCallback = (Bool, Bool, Int16, Data?) -> Void
private class CWrapper<Callback> {
@ -81,12 +68,15 @@ public extension LibSession {
networkStatusCallbacks.mutate { $0.removeValue(forKey: callbackId) }
}
static func onPathsChanged(callback: @escaping ([Set<CSNode>]) -> ()) -> UUID {
static func onPathsChanged(skipInitialCallbackIfEmpty: Bool = false, callback: @escaping ([Set<CSNode>], UUID) -> ()) -> UUID {
let callbackId: UUID = UUID()
pathsChangedCallbacks.mutate { $0[callbackId] = callback }
// Trigger the callback immediately with the most recent status
callback(lastPaths.wrappedValue)
let lastPaths: [Set<CSNode>] = self.lastPaths.wrappedValue
if !lastPaths.isEmpty || !skipInitialCallbackIfEmpty {
callback(lastPaths, callbackId)
}
return callbackId
}
@ -99,13 +89,27 @@ public extension LibSession {
static func addNetworkLogger() {
getOrCreateNetwork().first().sinkUntilComplete(receiveValue: { network in
network_add_logger(network, { logPtr, msgLen in
guard let log: String = String(pointer: logPtr, length: msgLen, encoding: .utf8) else {
print("[quic:info] Null log")
return
network_add_logger(network, { lvl, namePtr, nameLen, msgPtr, msgLen in
guard
LibSession.desiredLogCategories.contains(LogCategory(namePtr, nameLen)),
let msg: String = String(pointer: msgPtr, length: msgLen, encoding: .utf8)
else { return }
let trimmedLog: String = msg.trimmingCharacters(in: .whitespacesAndNewlines)
switch lvl {
case LOG_LEVEL_TRACE: OWSLogger.verbose(trimmedLog)
case LOG_LEVEL_DEBUG: OWSLogger.debug(trimmedLog)
case LOG_LEVEL_INFO: OWSLogger.info(trimmedLog)
case LOG_LEVEL_WARN: OWSLogger.warn(trimmedLog)
case LOG_LEVEL_ERROR: OWSLogger.error(trimmedLog)
case LOG_LEVEL_CRITICAL: OWSLogger.error(trimmedLog)
case LOG_LEVEL_OFF: break
default: break
}
print(log.trimmingCharacters(in: .whitespacesAndNewlines))
#if DEBUG
print(trimmedLog)
#endif
})
})
}
@ -175,6 +179,7 @@ public extension LibSession {
to destination: OnionRequestAPIDestination,
body: T?,
swarmPublicKey: String?,
timeout: TimeInterval,
using dependencies: Dependencies
) -> AnyPublisher<(ResponseInfoType, Data?), Error> {
return getOrCreateNetwork()
@ -218,6 +223,7 @@ public extension LibSession {
cPayloadBytes,
cPayloadBytes.count,
cSwarmPublicKey,
Int64(floor(timeout * 1000)),
{ success, timeout, statusCode, dataPtr, dataLen, ctx in
let data: Data? = dataPtr.map { Data(bytes: $0, count: dataLen) }
Unmanaged<CWrapper<NetworkCallback>>.fromOpaque(ctx!).takeRetainedValue()
@ -282,6 +288,7 @@ public extension LibSession {
cServerDestination,
cPayloadBytes,
cPayloadBytes.count,
Int64(floor(timeout * 1000)),
{ success, timeout, statusCode, dataPtr, dataLen, ctx in
let data: Data? = dataPtr.map { Data(bytes: $0, count: dataLen) }
Unmanaged<CWrapper<NetworkCallback>>.fromOpaque(ctx!).takeRetainedValue()
@ -349,6 +356,7 @@ public extension LibSession {
private static func updateNetworkStatus(cStatus: CONNECTION_STATUS) {
let status: NetworkStatus = NetworkStatus(status: cStatus)
SNLog("Network status changed to: \(status)")
lastNetworkStatus.mutate { lastNetworkStatus in
lastNetworkStatus = status
@ -381,8 +389,8 @@ public extension LibSession {
lastPaths.mutate { lastPaths in
lastPaths = paths
pathsChangedCallbacks.wrappedValue.forEach { _, callback in
callback(paths)
pathsChangedCallbacks.wrappedValue.forEach { id, callback in
callback(paths, id)
}
}
}
@ -417,9 +425,52 @@ public extension LibSession {
case (421, _): return SnodeAPIError.unassociatedPubkey
case (429, _): return SnodeAPIError.rateLimited
case (500, _), (502, _), (503, _): return SnodeAPIError.unreachable
case (500, _), (502, _), (503, _): return SnodeAPIError.internalServerError
case (_, .none): return NetworkError.unknown
case (_, .some(let responseString)): return NetworkError.requestFailed(error: responseString, rawData: data)
case (_, .some(let responseString)):
// An internal server error could return HTML data, this is an attempt to intercept that case
guard !responseString.starts(with: "500 Internal Server Error") else {
return SnodeAPIError.internalServerError
}
return NetworkError.requestFailed(error: responseString, rawData: data)
}
}
}
// MARK: - NetworkStatus
extension LibSession {
public enum NetworkStatus {
case unknown
case connecting
case connected
case disconnected
init(status: CONNECTION_STATUS) {
switch status {
case CONNECTION_STATUS_CONNECTING: self = .connecting
case CONNECTION_STATUS_CONNECTED: self = .connected
case CONNECTION_STATUS_DISCONNECTED: self = .disconnected
default: self = .unknown
}
}
}
}
// MARK: - LogCategory
extension LibSession {
enum LogCategory: String {
case quic
case network
case unknown
init(_ namePtr: UnsafePointer<CChar>?, _ nameLen: Int) {
switch String(pointer: namePtr, length: nameLen, encoding: .utf8).map({ LogCategory(rawValue: $0) }) {
case .some(let cat): self = cat
case .none: self = .unknown
}
}
}
}

@ -4,8 +4,6 @@
import Foundation
import Combine
import CryptoKit
import GRDB
import SessionUtilitiesKit
public extension Network.RequestType {
@ -26,6 +24,7 @@ public extension Network.RequestType {
to: OnionRequestAPIDestination.snode(snode),
body: payload,
swarmPublicKey: swarmPublicKey,
timeout: timeout,
using: dependencies
)
}
@ -61,28 +60,9 @@ public extension Network.RequestType {
),
body: request.httpBody,
swarmPublicKey: nil,
timeout: timeout,
using: dependencies
)
}
}
}
/// See the "Onion Requests" section of [The Session Whitepaper](https://arxiv.org/pdf/2002.04609.pdf) for more information.
public enum OnionRequestAPI {
// MARK: - Private API
fileprivate static func sendOnionRequest(
with body: Data?,
to destination: OnionRequestAPIDestination,
swarmPublicKey: String?,
timeout: TimeInterval,
using dependencies: Dependencies
) -> AnyPublisher<(ResponseInfoType, Data?), Error> {
return LibSession.sendOnionRequest(
to: destination,
body: body,
swarmPublicKey: swarmPublicKey,
using: dependencies
)
}
}

@ -32,7 +32,7 @@ public enum SnodeAPIError: Error, CustomStringConvertible {
case invalidNetwork
case invalidPayload
case missingSecretKey
case unreachable
case internalServerError
case unassociatedPubkey
case unableToRetrieveSwarm
@ -70,7 +70,7 @@ public enum SnodeAPIError: Error, CustomStringConvertible {
case .invalidNetwork: return "Unable to create network (SnodeAPIError.invalidNetwork)."
case .invalidPayload: return "Invalid payload (SnodeAPIError.invalidPayload)."
case .missingSecretKey: return "Missing secret key (SnodeAPIError.missingSecretKey)."
case .unreachable: return "The service node is unreachable (SnodeAPIError.unreachable)."
case .internalServerError: return "The service node is unreachable (SnodeAPIError.internalServerError)."
case .unassociatedPubkey: return "The service node is no longer associated with the public key (SnodeAPIError.unassociatedPubkey)."
case .unableToRetrieveSwarm: return "Unable to retrieve the swarm for the given public key (SnodeAPIError.unableToRetrieveSwarm)."
}

@ -5,7 +5,7 @@
import Foundation
public extension SnodeAPI {
enum Namespace: Int, Codable, Hashable {
enum Namespace: Int, Codable, Hashable, CustomStringConvertible {
case `default` = 0
case configUserProfile = 2
@ -16,6 +16,10 @@ public extension SnodeAPI {
case legacyClosedGroup = -10
/// This is used when we somehow receive a message from an unknown namespace (shouldn't really be possible)
case unknown = -9999989
/// This is a convenience namespace used to represent all other namespaces for specific API calls
case all = -9999990
// MARK: Variables
@ -51,7 +55,7 @@ public extension SnodeAPI {
case .configUserProfile, .configContacts,
.configConvoInfoVolatile, .configUserGroups,
.configClosedGroupInfo, .all:
.configClosedGroupInfo, .unknown, .all:
return false
}
}
@ -87,7 +91,7 @@ public extension SnodeAPI {
case .configUserProfile, .configContacts,
.configConvoInfoVolatile, .configUserGroups,
.configClosedGroupInfo, .all:
.configClosedGroupInfo, .unknown, .all:
return 1
}
}
@ -110,5 +114,22 @@ public extension SnodeAPI {
result[next.namespace] = -next.maxSize
}
}
// MARK: - CustomStringConvertible
public var description: String {
switch self {
case .`default`: return "default"
case .configUserProfile: return "configUserProfile"
case .configContacts: return "configContacts"
case .configConvoInfoVolatile: return "configConvoInfoVolatile"
case .configUserGroups: return "configUserGroups"
case .configClosedGroupInfo: return "configClosedGroupInfo"
case .legacyClosedGroup: return "legacyClosedGroup"
case .unknown: return "unknown"
case .all: return "all"
}
}
}
}

@ -148,10 +148,6 @@ public struct Job: Codable, Equatable, Hashable, Identifiable, FetchableRecord,
/// This job will run once each whenever the app becomes active (launch and return from background) and
/// may run again during the same session if `nextRunTimestamp` gets set
case recurringOnActive
/// This job will run once and, while it does get persisted to the database, upon subsequent launch jobs with
/// this behaivour will not be run and will be cleared from the database
case runOnceTransient
}
/// The `id` value is auto incremented by the database, if the `Job` hasn't been inserted into

@ -10,11 +10,6 @@ public extension Array where Element: CustomStringConvertible {
}
}
@inlinable public func zip<Sequence1, Sequence2, Sequence3>(_ sequence1: Sequence1, _ sequence2: Sequence2, _ sequence3: Sequence3) -> Array<(Sequence1.Element, Sequence2.Element, Sequence3.Element)> where Sequence1: Sequence, Sequence2: Sequence, Sequence3: Sequence {
return zip(zip(sequence1, sequence2), sequence3)
.map { firstZip, third -> (Sequence1.Element, Sequence2.Element, Sequence3.Element) in (firstZip.0, firstZip.1, third) }
}
public extension Array {
func appending(_ other: Element?) -> [Element] {
guard let other: Element = other else { return self }

@ -20,7 +20,6 @@ public protocol JobRunnerType {
func appDidFinishLaunching(using dependencies: Dependencies)
func appDidBecomeActive(using dependencies: Dependencies)
func startNonBlockingQueues(using dependencies: Dependencies)
func enableNewSingleExecutionJobsOnly(using dependencies: Dependencies)
func stopAndClearPendingJobs(exceptForVariant: Job.Variant?, using dependencies: Dependencies, onComplete: (() -> ())?)
// MARK: - Job Scheduling
@ -205,7 +204,6 @@ public final class JobRunner: JobRunnerType {
internal var appReadyToStartQueues: Atomic<Bool> = Atomic(false)
internal var appHasBecomeActive: Atomic<Bool> = Atomic(false)
internal var forceAllowSingleExecutionJobs: Atomic<Bool> = Atomic(false)
internal var perSessionJobsCompleted: Atomic<Set<Int64>> = Atomic([])
internal var hasCompletedInitialBecomeActive: Atomic<Bool> = Atomic(false)
internal var shutdownBackgroundTask: Atomic<OWSBackgroundTask?> = Atomic(nil)
@ -230,6 +228,7 @@ public final class JobRunner: JobRunnerType {
self.allowToExecuteJobs = (
isTestingJobRunner || (
Singleton.hasAppContext &&
Singleton.appContext.isMainApp &&
!SNUtilitiesKit.isRunningTests
)
)
@ -322,7 +321,6 @@ public final class JobRunner: JobRunnerType {
// Now that we've finished setting up the JobRunner, update the queue closures
self.blockingQueue.mutate {
$0?.canStart = { [weak self] queue -> Bool in (self?.canStart(queue: queue) == true) }
$0?.canStartPendingJobs = { [weak self] queue -> Bool in (self?.canStartPendingJobs(queue: queue) == true) }
$0?.onQueueDrained = { [weak self] in
// Once all blocking jobs have been completed we want to start running
// the remaining job queues
@ -338,9 +336,6 @@ public final class JobRunner: JobRunnerType {
self.queues.mutate {
$0.values.forEach { queue in
queue.canStart = { [weak self] targetQueue -> Bool in (self?.canStart(queue: targetQueue) == true) }
queue.canStartPendingJobs = { [weak self] targetQueue -> Bool in
(self?.canStartPendingJobs(queue: targetQueue) == true)
}
}
}
}
@ -353,19 +348,6 @@ public final class JobRunner: JobRunnerType {
}
public func canStart(queue: JobQueue?) -> Bool {
return (
allowToExecuteJobs && (
forceAllowSingleExecutionJobs.wrappedValue || (
appReadyToStartQueues.wrappedValue && (
queue?.type == .blocking ||
canStartNonBlockingQueue
)
)
)
)
}
public func canStartPendingJobs(queue: JobQueue?) -> Bool {
return (
allowToExecuteJobs &&
appReadyToStartQueues.wrappedValue && (
@ -465,17 +447,6 @@ public final class JobRunner: JobRunnerType {
}
public func appDidFinishLaunching(using dependencies: Dependencies) {
// Clear any 'runOnceTransient' entries in the database (they should only ever be run during
// the app session that they were scheduled in)
//
// Note: If we are already in "single-execution mode" then don't do this as there could be running
// jobs (this case occurs during Onboarding when trying to retrieve the existing profile name)
if !forceAllowSingleExecutionJobs.wrappedValue {
dependencies.storage.writeAsync { db in
try Job.filter(Job.Columns.behaviour == Job.Behaviour.runOnceTransient).deleteAll(db)
}
}
// Flag that the JobRunner can start it's queues
appReadyToStartQueues.mutate { $0 = true }
@ -539,7 +510,6 @@ public final class JobRunner: JobRunnerType {
// Flag that the JobRunner can start it's queues and start queueing non-launch jobs
appReadyToStartQueues.mutate { $0 = true }
appHasBecomeActive.mutate { $0 = true }
forceAllowSingleExecutionJobs.mutate { $0 = false }
// If we have a running "sutdownBackgroundTask" then we want to cancel it as otherwise it
// can result in the database being suspended and us being unable to interact with it at all
@ -603,27 +573,6 @@ public final class JobRunner: JobRunnerType {
}
}
public func enableNewSingleExecutionJobsOnly(using dependencies: Dependencies) {
// If we have already fully started the JobRunner then don't bother doing this (this shouldn't
// currently be possible but might be in the future and swapping this flag while the JobRunner
// is in it's "normal" mode could result in unexpected behaviour)
guard !appReadyToStartQueues.wrappedValue else { return }
// Clear any 'runOnceTransient' entries in the database (they should only ever be run during
// the app session that they were scheduled in)
dependencies.storage.writeAsync { db in
try Job.filter(Job.Columns.behaviour == Job.Behaviour.runOnceTransient).deleteAll(db)
}
// This function is called by the app extensions to allow them to run jobs directly without
// triggering any recurring or pending jobs
//
// Note: This will only allow jobs to run if they are directly added to a job queue as if
// `canStartPendingJobs` returns `false` then any persisted jobs **WILL NOT** be fetched and
// added to the queue
forceAllowSingleExecutionJobs.mutate { $0 = true }
}
public func stopAndClearPendingJobs(
exceptForVariant: Job.Variant?,
using dependencies: Dependencies,
@ -859,10 +808,6 @@ public final class JobRunner: JobRunnerType {
return (
job.behaviour == .runOnceNextLaunch ||
job.behaviour == .recurringOnLaunch ||
(
job.behaviour == .runOnceTransient &&
forceAllowSingleExecutionJobs.wrappedValue
) ||
appHasBecomeActive.wrappedValue
)
}
@ -1014,7 +959,6 @@ public final class JobQueue: Hashable {
private var executorMap: Atomic<[Job.Variant: JobExecutor.Type]> = Atomic([:])
fileprivate var canStart: ((JobQueue?) -> Bool)?
fileprivate var canStartPendingJobs: ((JobQueue?) -> Bool)?
fileprivate var onQueueDrained: (() -> ())?
fileprivate var hasStartedAtLeastOnce: Atomic<Bool> = Atomic(false)
fileprivate var isRunning: Atomic<Bool> = Atomic(false)
@ -1319,25 +1263,22 @@ public final class JobQueue: Hashable {
hasStartedAtLeastOnce.mutate { $0 = true }
// Get any pending jobs
var jobsToRun: [Job] = []
let jobIdsAlreadyRunning: Set<Int64> = currentlyRunningJobIds.wrappedValue
if canStartPendingJobs?(self) == true {
let jobVariants: [Job.Variant] = self.jobVariants
let jobsAlreadyInQueue: Set<Int64> = pendingJobsQueue.wrappedValue.compactMap { $0.id }.asSet()
jobsToRun = dependencies.storage.read(using: dependencies) { db in
try Job
.filterPendingJobs(
variants: jobVariants,
excludeFutureJobs: true,
includeJobsWithDependencies: false
)
.filter(!jobIdsAlreadyRunning.contains(Job.Columns.id)) // Exclude jobs already running
.filter(!jobsAlreadyInQueue.contains(Job.Columns.id)) // Exclude jobs already in the queue
.fetchAll(db)
}
.defaulting(to: [])
let jobVariants: [Job.Variant] = self.jobVariants
let jobIdsAlreadyRunning: Set<Int64> = currentlyRunningJobIds.wrappedValue
let jobsAlreadyInQueue: Set<Int64> = pendingJobsQueue.wrappedValue.compactMap { $0.id }.asSet()
let jobsToRun: [Job] = dependencies.storage.read(using: dependencies) { db in
try Job
.filterPendingJobs(
variants: jobVariants,
excludeFutureJobs: true,
includeJobsWithDependencies: false
)
.filter(!jobIdsAlreadyRunning.contains(Job.Columns.id)) // Exclude jobs already running
.filter(!jobsAlreadyInQueue.contains(Job.Columns.id)) // Exclude jobs already in the queue
.fetchAll(db)
}
.defaulting(to: [])
// Determine the number of jobs to run
var jobCount: Int = 0
@ -1524,14 +1465,6 @@ public final class JobQueue: Hashable {
}
private func scheduleNextSoonestJob(using dependencies: Dependencies) {
// If we can't schedule pending jobs then complete the queue
guard canStartPendingJobs?(self) == true else {
if executionType != .concurrent || currentlyRunningJobIds.wrappedValue.isEmpty {
self.onQueueDrained?()
}
return
}
// Retrieve any pending jobs from the database
let jobVariants: [Job.Variant] = self.jobVariants
let jobIdsAlreadyRunning: Set<Int64> = currentlyRunningJobIds.wrappedValue
@ -1914,10 +1847,6 @@ public extension JobRunner {
instance.appDidBecomeActive(using: dependencies)
}
static func enableNewSingleExecutionJobsOnly(using dependencies: Dependencies = Dependencies()) {
instance.enableNewSingleExecutionJobsOnly(using: dependencies)
}
static func afterBlockingQueue(callback: @escaping () -> ()) {
instance.afterBlockingQueue(callback: callback)
}

@ -1736,110 +1736,6 @@ class JobRunnerSpec: QuickSpec {
}
}
}
// MARK: ---- when running in single execution mode
context("when running in single execution mode") {
beforeEach {
jobRunner.enableNewSingleExecutionJobsOnly(using: dependencies)
}
// MARK: ------ starts the job if it has the run once transient behaviour
it("starts the job if it has the run once transient behaviour") {
job1 = Job(
id: 101,
failureCount: 0,
variant: .messageSend,
behaviour: .runOnceTransient,
shouldBlock: false,
shouldBeUnique: false,
shouldSkipLaunchBecomeActive: false,
nextRunTimestamp: 0,
threadId: nil,
interactionId: nil,
details: try? JSONEncoder()
.with(outputFormatting: .sortedKeys)
.encode(TestDetails(result: .success, completeTime: 1))
)
mockStorage.write { db in
try job1.insert(db)
jobRunner.upsert(db, job: job1, canStartJob: true, using: dependencies)
}
// Make sure the job is run
expect(Array(jobRunner.jobInfoFor(state: .running).keys)).to(equal([101]))
// Make sure there are no running jobs
dependencies.stepForwardInTime()
expect(Array(jobRunner.jobInfoFor(state: .running).keys)).to(beEmpty())
}
// MARK: ------ does not start the job if it does not have the run once transient behaviour
it("does not start the job if it does not have the run once transient behaviour") {
job1 = Job(
id: 101,
failureCount: 0,
variant: .messageSend,
behaviour: .runOnce,
shouldBlock: false,
shouldBeUnique: false,
shouldSkipLaunchBecomeActive: false,
nextRunTimestamp: 0,
threadId: nil,
interactionId: nil,
details: try? JSONEncoder()
.with(outputFormatting: .sortedKeys)
.encode(TestDetails(result: .success, completeTime: 1))
)
mockStorage.write { db in
try job1.insert(db)
jobRunner.upsert(db, job: job1, canStartJob: true, using: dependencies)
}
// Make sure the job does not run
expect(Array(jobRunner.jobInfoFor(state: .running).keys)).to(beEmpty())
}
// MARK: ------ after the app properly launches
context("after the app properly launches") {
beforeEach {
jobRunner.appDidFinishLaunching(using: dependencies)
jobRunner.appDidBecomeActive(using: dependencies)
}
// MARK: -------- is able to start jobs without the run once transient behaviour again
it("is able to start jobs without the run once transient behaviour again") {
job1 = Job(
id: 101,
failureCount: 0,
variant: .messageSend,
behaviour: .runOnce,
shouldBlock: false,
shouldBeUnique: false,
shouldSkipLaunchBecomeActive: false,
nextRunTimestamp: 0,
threadId: nil,
interactionId: nil,
details: try? JSONEncoder()
.with(outputFormatting: .sortedKeys)
.encode(TestDetails(result: .success, completeTime: 1))
)
mockStorage.write { db in
try job1.insert(db)
jobRunner.upsert(db, job: job1, canStartJob: true, using: dependencies)
expect(Array(jobRunner.jobInfoFor(state: .running).keys)).to(beEmpty())
}
expect(Array(jobRunner.jobInfoFor(state: .running).keys)).to(equal([101]))
}
}
}
}
}
}

@ -30,7 +30,6 @@ class MockJobRunner: Mock<JobRunnerType>, JobRunnerType {
func appDidFinishLaunching(using dependencies: Dependencies) {}
func appDidBecomeActive(using dependencies: Dependencies) {}
func enableNewSingleExecutionJobsOnly(using dependencies: Dependencies) {}
func startNonBlockingQueues(using dependencies: Dependencies) {}
func stopAndClearPendingJobs(exceptForVariant: Job.Variant?, onComplete: (() -> ())?) {

Loading…
Cancel
Save