// C o p y r i g h t © 2 0 2 2 R a n g e p r o o f P t y L t d . A l l r i g h t s r e s e r v e d .
//
// s t r i n g l i n t : d i s a b l e
import Foundation
import Combine
import GRDB
import SessionSnodeKit
import SessionUtilitiesKit
public class Poller {
public typealias PollResponse = (
messages : [ ProcessedMessage ] ,
rawMessageCount : Int ,
validMessageCount : Int ,
hadValidHashUpdate : Bool
)
internal enum PollerErrorResponse {
case stopPolling
case continuePolling
case continuePollingInfo ( String )
}
private var cancellables : Atomic < [ String : AnyCancellable ] > = Atomic ( [ : ] )
internal var isPolling : Atomic < [ String : Bool ] > = Atomic ( [ : ] )
internal var pollCount : Atomic < [ String : Int ] > = Atomic ( [ : ] )
internal var failureCount : Atomic < [ String : Int ] > = Atomic ( [ : ] )
internal var drainBehaviour : Atomic < [ String : Atomic < SwarmDrainBehaviour > ] > = Atomic ( [ : ] )
// MARK: - S e t t i n g s
// / T h e n a m e s p a c e s w h i c h t h i s p o l l e r q u e r i e s
internal var namespaces : [ SnodeAPI . Namespace ] {
preconditionFailure ( " abstract class - override in subclass " )
}
// / T h e q u e u e t h i s p o l l e r s h o u l d r u n o n
internal var pollerQueue : DispatchQueue {
preconditionFailure ( " abstract class - override in subclass " )
}
// / T h e b e h a v i o u r f o r h o w t h e p o l l e r s h o u l d d r a i n i t ' s s w a r m w h e n p o l l i n g
internal var pollDrainBehaviour : SwarmDrainBehaviour {
preconditionFailure ( " abstract class - override in subclass " )
}
// MARK: - P u b l i c A P I
public init ( ) { }
public func stopAllPollers ( ) {
let pollers : [ String ] = Array ( isPolling . wrappedValue . keys )
pollers . forEach { groupPublicKey in
self . stopPolling ( for : groupPublicKey )
}
}
public func stopPolling ( for publicKey : String ) {
isPolling . mutate { $0 [ publicKey ] = false }
failureCount . mutate { $0 [ publicKey ] = nil }
drainBehaviour . mutate { $0 [ publicKey ] = nil }
cancellables . mutate { $0 [ publicKey ] ? . cancel ( ) }
}
// MARK: - A b s t r a c t M e t h o d s
// / T h e n a m e f o r t h i s p o l l e r t o a p p e a r i n t h e l o g s
public func pollerName ( for publicKey : String ) -> String {
preconditionFailure ( " abstract class - override in subclass " )
}
// / C a l c u l a t e t h e d e l a y w h i c h s h o u l d o c c u r b e f o r e t h e n e x t p o l l
internal func nextPollDelay ( for publicKey : String , using dependencies : Dependencies ) -> TimeInterval {
preconditionFailure ( " abstract class - override in subclass " )
}
// / P e r f o r m a n d l o g i c w h i c h s h o u l d o c c u r w h e n t h e p o l l e r r o r s , w i l l s t o p p o l l i n g i f ` f a l s e ` i s r e t u r n e d
internal func handlePollError ( _ error : Error , for publicKey : String , using dependencies : Dependencies ) -> PollerErrorResponse {
preconditionFailure ( " abstract class - override in subclass " )
}
// MARK: - P r i v a t e A P I
internal func startIfNeeded ( for publicKey : String , using dependencies : Dependencies ) {
// R u n o n t h e ' p o l l e r Q u e u e ' t o e n s u r e a n y ' A t o m i c ' a c c e s s d o e s n ' t b l o c k t h e m a i n t h r e a d
// o n s t a r t u p
let drainBehaviour : Atomic < SwarmDrainBehaviour > = Atomic ( pollDrainBehaviour )
pollerQueue . async { [ weak self ] in
guard self ? . isPolling . wrappedValue [ publicKey ] != true else { return }
// M i g h t b e a r a c e c o n d i t i o n t h a t t h e s e t U p P o l l i n g f i n i s h e s t o o s o o n ,
// a n d t h e t i m e r i s n o t c r e a t e d , i f w e m a r k t h e g r o u p a s i s p o l l i n g
// a f t e r s e t U p P o l l i n g . S o t h e p o l l e r m a y n o t w o r k , t h u s m i s s e s m e s s a g e s
self ? . isPolling . mutate { $0 [ publicKey ] = true }
self ? . drainBehaviour . mutate { $0 [ publicKey ] = drainBehaviour }
self ? . pollRecursively ( for : publicKey , drainBehaviour : drainBehaviour , using : dependencies )
}
}
private func pollRecursively (
for swarmPublicKey : String ,
drainBehaviour : Atomic < SwarmDrainBehaviour > ,
using dependencies : Dependencies
) {
guard isPolling . wrappedValue [ swarmPublicKey ] = = true else { return }
let pollerName : String = pollerName ( for : swarmPublicKey )
let namespaces : [ SnodeAPI . Namespace ] = self . namespaces
let pollerQueue : DispatchQueue = self . pollerQueue
let lastPollStart : TimeInterval = dependencies . dateNow . timeIntervalSince1970
let fallbackPollDelay : TimeInterval = self . nextPollDelay ( for : swarmPublicKey , using : dependencies )
// S t o r e t h e p u b l i s h e r i n t p t h e c a n c e l l a b l e s d i c t i o n a r y
cancellables . mutate { [ weak self ] cancellables in
cancellables [ swarmPublicKey ] = self ? . poll (
namespaces : namespaces ,
for : swarmPublicKey ,
drainBehaviour : drainBehaviour ,
using : dependencies
)
. subscribe ( on : pollerQueue , using : dependencies )
. receive ( on : pollerQueue , using : dependencies )
// FIXME: I n i O S 1 4 . 0 a ` f l a t M a p ` w a s a d d e d w h e r e t h e e r r o r t y p e i n ` N e v e r ` , w e s h o u l d u s e t h a t h e r e
. map { response -> Result < PollResponse , Error > in Result . success ( response ) }
. catch { error -> AnyPublisher < Result < PollResponse , Error > , Error > in
Just ( Result . failure ( error ) ) . setFailureType ( to : Error . self ) . eraseToAnyPublisher ( )
}
. sink (
receiveCompletion : { _ in } , // N e v e r c a l l e d
receiveValue : { result in
// I f t h e p o l l i n g h a s b e e n c a n c e l l e d t h e n d o n ' t c o n t i n u e
guard self ? . isPolling . wrappedValue [ swarmPublicKey ] = = true else { return }
// I n c r e m e n t o r r e s e t t h e f a i l u r e C o u n t
let failureCount : Int
switch result {
case . failure :
failureCount = ( self ? . failureCount
. mutate {
let updatedFailureCount : Int = ( ( $0 [ swarmPublicKey ] ? ? 0 ) + 1 )
$0 [ swarmPublicKey ] = updatedFailureCount
return updatedFailureCount
} )
. defaulting ( to : - 1 )
case . success :
failureCount = 0
self ? . failureCount . mutate { $0 . removeValue ( forKey : swarmPublicKey ) }
}
// L o g i n f o r m a t i o n a b o u t t h e p o l l
let endTime : TimeInterval = dependencies . dateNow . timeIntervalSince1970
let duration : TimeUnit = . seconds ( endTime - lastPollStart )
let nextPollInterval : TimeUnit = . seconds ( ( self ? . nextPollDelay ( for : swarmPublicKey , using : dependencies ) )
. defaulting ( to : fallbackPollDelay ) )
switch result {
case . failure ( let error ) :
// D e t e r m i n e i f t h e e r r o r s h o u l d s t o p u s f r o m p o l l i n g a n y m o r e
switch self ? . handlePollError ( error , for : swarmPublicKey , using : dependencies ) {
case . stopPolling : return
case . continuePollingInfo ( let info ) :
Log . error ( " \( pollerName ) failed to process any messages after \( duration , unit : . s ) due to error: \( error ) . \( info ) . Setting failure count to \( failureCount ) . Next poll in \( nextPollInterval , unit : . s ) . " )
case . continuePolling , . none :
Log . error ( " \( pollerName ) failed to process any messages after \( duration , unit : . s ) due to error: \( error ) . Setting failure count to \( failureCount ) . Next poll in \( nextPollInterval , unit : . s ) . " )
}
case . success ( let response ) :
switch ( response . rawMessageCount , response . validMessageCount , response . hadValidHashUpdate ) {
case ( 0 , _ , _ ) :
Log . info ( " Received no new messages in \( pollerName ) after \( duration , unit : . s ) . Next poll in \( nextPollInterval , unit : . s ) . " )
case ( _ , 0 , false ) :
Log . info ( " Received \( response . rawMessageCount ) new message(s) in \( pollerName ) after \( duration , unit : . s ) , all duplicates - marked the hash we polled with as invalid. Next poll in \( nextPollInterval , unit : . s ) . " )
default :
Log . info ( " Received \( response . validMessageCount ) new message(s) in \( pollerName ) after \( duration , unit : . s ) (duplicates: \( response . rawMessageCount - response . validMessageCount ) ). Next poll in \( nextPollInterval , unit : . s ) . " )
}
}
// S c h e d u l e t h e n e x t p o l l
pollerQueue . asyncAfter ( deadline : . now ( ) + . milliseconds ( Int ( nextPollInterval . timeInterval * 1000 ) ) , qos : . default , using : dependencies ) {
self ? . pollRecursively ( for : swarmPublicKey , drainBehaviour : drainBehaviour , using : dependencies )
}
}
)
}
}
// / P o l l s t h e s p e c i f i e d n a m e s p a c e s a n d p r o c e s s e s a n y m e s s a g e s , r e t u r n i n g a n a r r a y o f m e s s a g e s t h a t w e r e
// / s u c c e s s f u l l y p r o c e s s e d
// /
// / * * N o t e : * * T h e r e t u r n e d m e s s a g e s w i l l h a v e a l r e a d y b e e n p r o c e s s e d b y t h e ` P o l l e r ` , t h e y a r e o n l y r e t u r n e d
// / f o r c a s e s w h e r e w e n e e d e x p l i c i t / c u s t o m b e h a v i o u r s t o o c c u r ( e g . O n b o a r d i n g )
public func poll (
namespaces : [ SnodeAPI . Namespace ] ,
for swarmPublicKey : String ,
calledFromBackgroundPoller : Bool = false ,
isBackgroundPollValid : @ escaping ( ( ) -> Bool ) = { true } ,
drainBehaviour : Atomic < SwarmDrainBehaviour > ,
using dependencies : Dependencies
) -> AnyPublisher < PollResponse , Error > {
// I f t h e p o l l i n g h a s b e e n c a n c e l l e d t h e n d o n ' t c o n t i n u e
guard
( calledFromBackgroundPoller && isBackgroundPollValid ( ) ) ||
isPolling . wrappedValue [ swarmPublicKey ] = = true
else {
return Just ( ( [ ] , 0 , 0 , false ) )
. setFailureType ( to : Error . self )
. eraseToAnyPublisher ( )
}
let pollerQueue : DispatchQueue = self . pollerQueue
let configHashes : [ String ] = LibSession . configHashes ( for : swarmPublicKey , using : dependencies )
// F e t c h t h e m e s s a g e s
return LibSession . getSwarm ( swarmPublicKey : swarmPublicKey )
. tryFlatMapWithRandomSnode ( drainBehaviour : drainBehaviour , using : dependencies ) { snode -> AnyPublisher < [ SnodeAPI . Namespace : ( info : ResponseInfoType , data : ( messages : [ SnodeReceivedMessage ] , lastHash : String ? ) ? ) ] , Error > in
SnodeAPI . poll (
namespaces : namespaces ,
refreshingConfigHashes : configHashes ,
from : snode ,
swarmPublicKey : swarmPublicKey ,
calledFromBackgroundPoller : calledFromBackgroundPoller ,
isBackgroundPollValid : isBackgroundPollValid ,
using : dependencies
)
}
. flatMap { [ weak self ] namespacedResults -> AnyPublisher < PollResponse , Error > in
guard
( calledFromBackgroundPoller && isBackgroundPollValid ( ) ) ||
self ? . isPolling . wrappedValue [ swarmPublicKey ] = = true
else {
return Just ( ( [ ] , 0 , 0 , false ) )
. setFailureType ( to : Error . self )
. eraseToAnyPublisher ( )
}
// G e t a l l o f t h e m e s s a g e s a n d s o r t t h e m b y t h e i r r e q u i r e d ' p r o c e s s i n g O r d e r '
let sortedMessages : [ ( namespace : SnodeAPI . Namespace , messages : [ SnodeReceivedMessage ] ) ] = namespacedResults
. compactMap { namespace , result in ( result . data ? . messages ) . map { ( namespace , $0 ) } }
. sorted { lhs , rhs in lhs . namespace . processingOrder < rhs . namespace . processingOrder }
let rawMessageCount : Int = sortedMessages . map { $0 . messages . count } . reduce ( 0 , + )
// N o n e e d t o d o a n y t h i n g i f t h e r e a r e n o m e s s a g e s
guard rawMessageCount > 0 else {
return Just ( ( [ ] , 0 , 0 , false ) )
. setFailureType ( to : Error . self )
. eraseToAnyPublisher ( )
}
// O t h e r w i s e p r o c e s s t h e m e s s a g e s a n d a d d t h e m t o t h e q u e u e f o r h a n d l i n g
let lastHashes : [ String ] = namespacedResults
. compactMap { $0 . value . data ? . lastHash }
let otherKnownHashes : [ String ] = namespacedResults
. filter { $0 . key . shouldFetchSinceLastHash }
. compactMap { $0 . value . data ? . messages . map { $0 . info . hash } }
. reduce ( [ ] , + )
var messageCount : Int = 0
var processedMessages : [ ProcessedMessage ] = [ ]
var hadValidHashUpdate : Bool = false
var configMessageJobsToRun : [ Job ] = [ ]
var standardMessageJobsToRun : [ Job ] = [ ]
dependencies . storage . write { db in
let allProcessedMessages : [ ProcessedMessage ] = sortedMessages
. compactMap { namespace , messages -> [ ProcessedMessage ] ? in
let processedMessages : [ ProcessedMessage ] = messages
. compactMap { message -> ProcessedMessage ? in
do {
return try Message . processRawReceivedMessage (
db ,
rawMessage : message ,
publicKey : swarmPublicKey ,
using : dependencies
)
}
catch {
switch error {
// / I g n o r e d u p l i c a t e & s e l f S e n d m e s s a g e e r r o r s ( a n d d o n ' t b o t h e r l o g g i n g t h e m a s t h e r e
// / w i l l b e a l o t s i n c e w e e a c h s e r v i c e n o d e d u p l i c a t e s m e s s a g e s )
case DatabaseError . SQLITE_CONSTRAINT_UNIQUE ,
DatabaseError . SQLITE_CONSTRAINT , // / S o m e t i m e s t h r o w n f o r U N I Q U E
MessageReceiverError . duplicateMessage ,
MessageReceiverError . duplicateControlMessage ,
MessageReceiverError . selfSend :
break
case MessageReceiverError . duplicateMessageNewSnode :
hadValidHashUpdate = true
break
case DatabaseError . SQLITE_ABORT :
// / I n t h e b a c k g r o u n d i g n o r e ' S Q L I T E _ A B O R T ' ( i t g e n e r a l l y m e a n s t h e
// / B a c k g r o u n d P o l l e r h a s t i m e d o u t
if ! calledFromBackgroundPoller {
Log . warn ( " Failed to the database being suspended (running in background with no background task). " )
}
break
default : Log . error ( " Failed to deserialize envelope due to error: \( error ) . " )
}
return nil
}
}
// / I f t h i s m e s s a g e s h o u l d b e h a n d l e d s y n c h r o n o u s l y t h e n d o s o h e r e b e f o r e p r o c e s s i n g t h e n e x t n a m e s p a c e
guard namespace . shouldHandleSynchronously else { return processedMessages }
if namespace . isConfigNamespace {
do {
// / P r o c e s s c o n f i g m e s s a g e s a l l a t o n c e i n c a s e t h e y a r e m u l t i - p a r t m e s s a g e s
try LibSession . handleConfigMessages (
db ,
messages : ConfigMessageReceiveJob
. Details (
messages : processedMessages ,
calledFromBackgroundPoller : false
)
. messages ,
publicKey : swarmPublicKey ,
using : dependencies
)
}
catch { Log . error ( " Failed to handle processed config message due to error: \( error ) . " ) }
}
else {
// / I n d i v i d u a l l y p r o c e s s n o n - c o n f i g m e s s a g e s
processedMessages . forEach { processedMessage in
guard case . standard ( let threadId , let threadVariant , let proto , let messageInfo ) = processedMessage else {
return
}
do {
try MessageReceiver . handle (
db ,
threadId : threadId ,
threadVariant : threadVariant ,
message : messageInfo . message ,
serverExpirationTimestamp : messageInfo . serverExpirationTimestamp ,
associatedWithProto : proto ,
using : dependencies
)
}
catch { Log . error ( " Failed to handle processed message due to error: \( error ) . " ) }
}
}
return nil
}
. flatMap { $0 }
// A d d a j o b t o p r o c e s s t h e c o n f i g m e s s a g e s f i r s t
let configJobIds : [ Int64 ] = allProcessedMessages
. filter { $0 . isConfigMessage && ! $0 . namespace . shouldHandleSynchronously }
. grouped { $0 . threadId }
. compactMap { threadId , threadMessages in
messageCount += threadMessages . count
processedMessages += threadMessages
let jobToRun : Job ? = Job (
variant : . configMessageReceive ,
behaviour : . runOnce ,
threadId : threadId ,
details : ConfigMessageReceiveJob . Details (
messages : threadMessages ,
calledFromBackgroundPoller : calledFromBackgroundPoller
)
)
configMessageJobsToRun = configMessageJobsToRun . appending ( jobToRun )
// I f w e a r e f o r c e - p o l l i n g t h e n a d d t o t h e J o b R u n n e r s o t h e y a r e
// p e r s i s t e n t a n d w i l l r e t r y o n t h e n e x t a p p r u n i f t h e y f a i l b u t
// d o n ' t l e t t h e m a u t o - s t a r t
let updatedJob : Job ? = dependencies . jobRunner
. add (
db ,
job : jobToRun ,
canStartJob : ! calledFromBackgroundPoller ,
using : dependencies
)
return updatedJob ? . id
}
// A d d j o b s f o r p r o c e s s i n g n o n - c o n f i g m e s s a g e s w h i c h a r e d e p e n d a n t o n t h e c o n f i g m e s s a g e
// p r o c e s s i n g j o b s
allProcessedMessages
. filter { ! $0 . isConfigMessage && ! $0 . namespace . shouldHandleSynchronously }
. grouped { $0 . threadId }
. forEach { threadId , threadMessages in
messageCount += threadMessages . count
processedMessages += threadMessages
let jobToRun : Job ? = Job (
variant : . messageReceive ,
behaviour : . runOnce ,
threadId : threadId ,
details : MessageReceiveJob . Details (
messages : threadMessages ,
calledFromBackgroundPoller : calledFromBackgroundPoller
)
)
standardMessageJobsToRun = standardMessageJobsToRun . appending ( jobToRun )
// I f w e a r e f o r c e - p o l l i n g t h e n a d d t o t h e J o b R u n n e r s o t h e y a r e
// p e r s i s t e n t a n d w i l l r e t r y o n t h e n e x t a p p r u n i f t h e y f a i l b u t
// d o n ' t l e t t h e m a u t o - s t a r t
let updatedJob : Job ? = dependencies . jobRunner
. add (
db ,
job : jobToRun ,
canStartJob : ! calledFromBackgroundPoller ,
using : dependencies
)
// C r e a t e t h e d e p e n d e n c y b e t w e e n t h e j o b s
if let updatedJobId : Int64 = updatedJob ? . id {
do {
try configJobIds . forEach { configJobId in
try JobDependencies (
jobId : updatedJobId ,
dependantId : configJobId
)
. insert ( db )
}
}
catch {
Log . warn ( " Failed to add dependency between config processing and non-config processing messageReceive jobs. " )
}
}
}
// C l e a n u p m e s s a g e h a s h e s a n d a d d s o m e l o g s a b o u t t h e p o l l r e s u l t s
if sortedMessages . isEmpty && ! hadValidHashUpdate {
// U p d a t e t h e c a c h e d v a l i d i t y o f t h e m e s s a g e s
try SnodeReceivedMessageInfo . handlePotentialDeletedOrInvalidHash (
db ,
potentiallyInvalidHashes : lastHashes ,
otherKnownValidHashes : otherKnownHashes
)
}
}
// I f w e a r e n ' t r u n i n g i n a b a c k g r o u n d p o l l e r t h e n j u s t f i n i s h i m m e d i a t e l y
guard calledFromBackgroundPoller else {
return Just ( ( processedMessages , rawMessageCount , messageCount , hadValidHashUpdate ) )
. setFailureType ( to : Error . self )
. eraseToAnyPublisher ( )
}
// W e w a n t t o t r y t o h a n d l e t h e r e c e i v e j o b s i m m e d i a t e l y i n t h e b a c k g r o u n d
return Publishers
. MergeMany (
configMessageJobsToRun . map { job -> AnyPublisher < Void , Error > in
Deferred {
Future < Void , Error > { resolver in
// N o t e : I n t h e b a c k g r o u n d w e j u s t w a n t j o b s t o f a i l s i l e n t l y
ConfigMessageReceiveJob . run (
job ,
queue : pollerQueue ,
success : { _ , _ , _ in resolver ( Result . success ( ( ) ) ) } ,
failure : { _ , _ , _ , _ in resolver ( Result . success ( ( ) ) ) } ,
deferred : { _ , _ in resolver ( Result . success ( ( ) ) ) } ,
using : dependencies
)
}
}
. eraseToAnyPublisher ( )
}
)
. collect ( )
. flatMap { _ in
Publishers
. MergeMany (
standardMessageJobsToRun . map { job -> AnyPublisher < Void , Error > in
Deferred {
Future < Void , Error > { resolver in
// N o t e : I n t h e b a c k g r o u n d w e j u s t w a n t j o b s t o f a i l s i l e n t l y
MessageReceiveJob . run (
job ,
queue : pollerQueue ,
success : { _ , _ , _ in resolver ( Result . success ( ( ) ) ) } ,
failure : { _ , _ , _ , _ in resolver ( Result . success ( ( ) ) ) } ,
deferred : { _ , _ in resolver ( Result . success ( ( ) ) ) } ,
using : dependencies
)
}
}
. eraseToAnyPublisher ( )
}
)
. collect ( )
}
. map { _ in ( processedMessages , rawMessageCount , messageCount , hadValidHashUpdate ) }
. eraseToAnyPublisher ( )
}
. eraseToAnyPublisher ( )
}
}