chore: merged what can be between user and group sync job

pull/2873/head
Audric Ackermann 2 years ago
parent d9300e67a0
commit c14276200e

@ -1,5 +1,10 @@
import { GroupPubkeyType, PubkeyType } from 'libsession_util_nodejs'; import { GroupPubkeyType, PubkeyType } from 'libsession_util_nodejs';
import { SnodeNamespaces, SnodeNamespacesGroup } from './namespaces'; import {
SnodeNamespaces,
SnodeNamespacesGroup,
SnodeNamespacesGroupConfig,
UserConfigNamespaces,
} from './namespaces';
export type SwarmForSubRequest = { method: 'get_swarm'; params: { pubkey: string } }; export type SwarmForSubRequest = { method: 'get_swarm'; params: { pubkey: string } };
@ -108,14 +113,24 @@ export type DeleteFromNodeWithTimestampParams = {
} & DeleteSigParameters; } & DeleteSigParameters;
export type DeleteByHashesFromNodeParams = { messages: Array<string> } & DeleteSigParameters; export type DeleteByHashesFromNodeParams = { messages: Array<string> } & DeleteSigParameters;
export type StoreOnNodeData = { type StoreOnNodeShared = {
pubkey: GroupPubkeyType | PubkeyType;
networkTimestamp: number; networkTimestamp: number;
namespace: number;
data: Uint8Array; data: Uint8Array;
ttl: number; ttl: number;
}; };
type StoreOnNodeGroupConfig = StoreOnNodeShared & {
pubkey: GroupPubkeyType;
namespace: SnodeNamespacesGroupConfig;
};
type StoreOnNodeUserConfig = StoreOnNodeShared & {
pubkey: PubkeyType;
namespace: UserConfigNamespaces;
};
export type StoreOnNodeData = StoreOnNodeGroupConfig | StoreOnNodeUserConfig;
export type StoreOnNodeSubRequest = { method: 'store'; params: StoreOnNodeParams }; export type StoreOnNodeSubRequest = { method: 'store'; params: StoreOnNodeParams };
export type NetworkTimeSubRequest = { method: 'info'; params: object }; export type NetworkTimeSubRequest = { method: 'info'; params: object };
@ -179,7 +194,8 @@ export type SnodeApiSubRequests =
// eslint-disable-next-line @typescript-eslint/array-type // eslint-disable-next-line @typescript-eslint/array-type
export type NonEmptyArray<T> = [T, ...T[]]; export type NonEmptyArray<T> = [T, ...T[]];
export type NotEmptyArrayOfBatchResults = NonEmptyArray<{ export type BatchResultEntry = {
code: number; code: number;
body: Record<string, any>; body: Record<string, any>;
}>; };
export type NotEmptyArrayOfBatchResults = NonEmptyArray<BatchResultEntry>;

@ -57,7 +57,7 @@ export type SnodeNamespacesLegacyGroup = PickEnum<
SnodeNamespaces.LegacyClosedGroup SnodeNamespaces.LegacyClosedGroup
>; >;
type SnodeNamespacesGroupConfig = PickEnum< export type SnodeNamespacesGroupConfig = PickEnum<
SnodeNamespaces, SnodeNamespaces,
| SnodeNamespaces.ClosedGroupInfo | SnodeNamespaces.ClosedGroupInfo
| SnodeNamespaces.ClosedGroupMembers | SnodeNamespaces.ClosedGroupMembers
@ -71,10 +71,7 @@ export type SnodeNamespacesGroup =
| SnodeNamespacesGroupConfig | SnodeNamespacesGroupConfig
| PickEnum<SnodeNamespaces, SnodeNamespaces.ClosedGroupMessages>; | PickEnum<SnodeNamespaces, SnodeNamespaces.ClosedGroupMessages>;
export type SnodeNamespacesUser = PickEnum< export type SnodeNamespacesUser = PickEnum<SnodeNamespaces, SnodeNamespaces.Default>;
SnodeNamespaces,
SnodeNamespaces.UserContacts | SnodeNamespaces.UserProfile | SnodeNamespaces.Default
>;
export type UserConfigNamespaces = PickEnum< export type UserConfigNamespaces = PickEnum<
SnodeNamespaces, SnodeNamespaces,

@ -45,7 +45,7 @@ async function handleGroupSharedConfigMessages(
// do the merge with our current state // do the merge with our current state
await MetaGroupWrapperActions.metaMerge(groupPk, toMerge); await MetaGroupWrapperActions.metaMerge(groupPk, toMerge);
// save updated dumps to the DB right away // save updated dumps to the DB right away
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk); await LibSessionUtil.saveDumpsToDb(groupPk);
// refresh the redux slice with the merged result // refresh the redux slice with the merged result
window.inboxStore.dispatch( window.inboxStore.dispatch(

@ -1,26 +1,19 @@
/* eslint-disable no-await-in-loop */ /* eslint-disable no-await-in-loop */
import { PubkeyType } from 'libsession_util_nodejs'; import { PubkeyType } from 'libsession_util_nodejs';
import { isArray, isEmpty, isNumber, isString } from 'lodash'; import { isArray, isEmpty, isNumber } from 'lodash';
import { v4 } from 'uuid'; import { v4 } from 'uuid';
import { UserUtils } from '../..'; import { UserUtils } from '../..';
import { ConfigDumpData } from '../../../../data/configDump/configDump'; import { ConfigDumpData } from '../../../../data/configDump/configDump';
import { ConfigurationSyncJobDone } from '../../../../shims/events'; import { ConfigurationSyncJobDone } from '../../../../shims/events';
import { isSignInByLinking } from '../../../../util/storage'; import { isSignInByLinking } from '../../../../util/storage';
import { GenericWrapperActions } from '../../../../webworker/workers/browser/libsession_worker_interface'; import { GenericWrapperActions } from '../../../../webworker/workers/browser/libsession_worker_interface';
import { import { StoreOnNodeData } from '../../../apis/snode_api/SnodeRequestTypes';
NotEmptyArrayOfBatchResults,
StoreOnNodeData,
} from '../../../apis/snode_api/SnodeRequestTypes';
import { GetNetworkTime } from '../../../apis/snode_api/getNetworkTime'; import { GetNetworkTime } from '../../../apis/snode_api/getNetworkTime';
import { TTL_DEFAULT } from '../../../constants'; import { TTL_DEFAULT } from '../../../constants';
import { ConvoHub } from '../../../conversations'; import { ConvoHub } from '../../../conversations';
import { MessageSender } from '../../../sending/MessageSender'; import { MessageSender } from '../../../sending/MessageSender';
import { allowOnlyOneAtATime } from '../../Promise'; import { allowOnlyOneAtATime } from '../../Promise';
import { import { LibSessionUtil, UserSuccessfulChange } from '../../libsession/libsession_utils';
LibSessionUtil,
PendingChangesForUs,
UserSingleDestinationChanges,
} from '../../libsession/libsession_utils';
import { runners } from '../JobRunner'; import { runners } from '../JobRunner';
import { import {
AddJobCheckReturn, AddJobCheckReturn,
@ -38,78 +31,18 @@ const defaultMaxAttempts = 2;
*/ */
let lastRunConfigSyncJobTimestamp: number | null = null; let lastRunConfigSyncJobTimestamp: number | null = null;
type UserSuccessfulChange = { async function confirmPushedAndDump(
pushed: PendingChangesForUs;
updatedHash: string;
};
/**
* This function is run once we get the results from the multiple batch-send.
*/
function resultsToSuccessfulChange(
result: NotEmptyArrayOfBatchResults | null,
request: UserSingleDestinationChanges
): Array<UserSuccessfulChange> {
const successfulChanges: Array<UserSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const batchResult = result[j];
const messagePostedHashes = batchResult?.body?.hash;
if (batchResult.code === 200 && isString(messagePostedHashes) && request.messages?.[j]) {
// the library keeps track of the hashes to push and pushed using the hashes now
successfulChanges.push({
updatedHash: messagePostedHashes,
pushed: request.messages?.[j],
});
}
}
return successfulChanges;
}
async function buildAndSaveDumpsToDB(
changes: Array<UserSuccessfulChange>, changes: Array<UserSuccessfulChange>,
us: string us: string
): Promise<void> { ): Promise<void> {
for (let i = 0; i < changes.length; i++) { for (let i = 0; i < changes.length; i++) {
const change = changes[i]; const change = changes[i];
const variant = LibSessionUtil.userNamespaceToVariant(change.pushed.namespace); const variant = LibSessionUtil.userNamespaceToVariant(change.pushed.namespace);
await GenericWrapperActions.confirmPushed(
const needsDump = await LibSessionUtil.markAsPushed(
variant, variant,
change.pushed.seqno.toNumber(), change.pushed.seqno.toNumber(),
change.updatedHash change.updatedHash
); );
if (!needsDump) {
continue;
}
const dump = await GenericWrapperActions.dump(variant);
await ConfigDumpData.saveConfigDump({
data: dump,
publicKey: us,
variant,
});
}
}
async function saveDumpsNeededToDB(us: string) {
for (let i = 0; i < LibSessionUtil.requiredUserVariants.length; i++) {
const variant = LibSessionUtil.requiredUserVariants[i];
const needsDump = await GenericWrapperActions.needsDump(variant); const needsDump = await GenericWrapperActions.needsDump(variant);
if (!needsDump) { if (!needsDump) {
@ -139,16 +72,16 @@ async function pushChangesToUserSwarmIfNeeded() {
} }
// save the dumps to DB even before trying to push them, so at least we have an up to date dumps in the DB in case of crash, no network etc // save the dumps to DB even before trying to push them, so at least we have an up to date dumps in the DB in case of crash, no network etc
await saveDumpsNeededToDB(us); await LibSessionUtil.saveDumpsToDb(us);
const singleDestChanges = await LibSessionUtil.pendingChangesForUs(); const changesToPush = await LibSessionUtil.pendingChangesForUs();
// If there are no pending changes then the job can just complete (next time something // If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case) // is updated we want to try and run immediately so don't scuedule another run in this case)
if (isEmpty(singleDestChanges?.messages)) { if (isEmpty(changesToPush?.messages)) {
triggerConfSyncJobDone(); triggerConfSyncJobDone();
return RunJobResult.Success; return RunJobResult.Success;
} }
const msgs: Array<StoreOnNodeData> = singleDestChanges.messages.map(item => { const msgs: Array<StoreOnNodeData> = changesToPush.messages.map(item => {
return { return {
namespace: item.namespace, namespace: item.namespace,
pubkey: us, pubkey: us,
@ -158,14 +91,10 @@ async function pushChangesToUserSwarmIfNeeded() {
}; };
}); });
const result = await MessageSender.sendEncryptedDataToSnode( const result = await MessageSender.sendEncryptedDataToSnode(msgs, us, changesToPush.allOldHashes);
msgs,
us,
singleDestChanges.allOldHashes
);
const expectedReplyLength = const expectedReplyLength =
singleDestChanges.messages.length + (singleDestChanges.allOldHashes.size ? 1 : 0); changesToPush.messages.length + (changesToPush.allOldHashes.size ? 1 : 0);
// we do a sequence call here. If we do not have the right expected number of results, consider it a failure // we do a sequence call here. If we do not have the right expected number of results, consider it a failure
if (!isArray(result) || result.length !== expectedReplyLength) { if (!isArray(result) || result.length !== expectedReplyLength) {
window.log.info( window.log.info(
@ -175,14 +104,14 @@ async function pushChangesToUserSwarmIfNeeded() {
return RunJobResult.RetryJobIfPossible; return RunJobResult.RetryJobIfPossible;
} }
const changes = resultsToSuccessfulChange(result, singleDestChanges); const changes = LibSessionUtil.batchResultsToUserSuccessfulChange(result, changesToPush);
if (isEmpty(changes)) { if (isEmpty(changes)) {
return RunJobResult.RetryJobIfPossible; return RunJobResult.RetryJobIfPossible;
} }
// Now that we have the successful changes, we need to mark them as pushed and // Now that we have the successful changes, we need to mark them as pushed and
// generate any config dumps which need to be stored // generate any config dumps which need to be stored
await buildAndSaveDumpsToDB(changes, us); await confirmPushedAndDump(changes, us);
triggerConfSyncJobDone(); triggerConfSyncJobDone();
return RunJobResult.Success; return RunJobResult.Success;
} }

@ -1,13 +1,11 @@
/* eslint-disable no-await-in-loop */ /* eslint-disable no-await-in-loop */
import { GroupPubkeyType } from 'libsession_util_nodejs'; import { GroupPubkeyType } from 'libsession_util_nodejs';
import { isArray, isEmpty, isNumber, isString } from 'lodash'; import { isArray, isEmpty, isNumber } from 'lodash';
import { UserUtils } from '../..'; import { UserUtils } from '../..';
import { assertUnreachable } from '../../../../types/sqlSharedTypes';
import { isSignInByLinking } from '../../../../util/storage'; import { isSignInByLinking } from '../../../../util/storage';
import { MetaGroupWrapperActions } from '../../../../webworker/workers/browser/libsession_worker_interface'; import { MetaGroupWrapperActions } from '../../../../webworker/workers/browser/libsession_worker_interface';
import { import { StoreOnNodeData } from '../../../apis/snode_api/SnodeRequestTypes';
NotEmptyArrayOfBatchResults,
StoreOnNodeData,
} from '../../../apis/snode_api/SnodeRequestTypes';
import { GetNetworkTime } from '../../../apis/snode_api/getNetworkTime'; import { GetNetworkTime } from '../../../apis/snode_api/getNetworkTime';
import { SnodeNamespaces } from '../../../apis/snode_api/namespaces'; import { SnodeNamespaces } from '../../../apis/snode_api/namespaces';
import { TTL_DEFAULT } from '../../../constants'; import { TTL_DEFAULT } from '../../../constants';
@ -15,11 +13,7 @@ import { ConvoHub } from '../../../conversations';
import { MessageSender } from '../../../sending/MessageSender'; import { MessageSender } from '../../../sending/MessageSender';
import { PubKey } from '../../../types'; import { PubKey } from '../../../types';
import { allowOnlyOneAtATime } from '../../Promise'; import { allowOnlyOneAtATime } from '../../Promise';
import { import { GroupSuccessfulChange, LibSessionUtil } from '../../libsession/libsession_utils';
GroupSingleDestinationChanges,
LibSessionUtil,
PendingChangesForGroup,
} from '../../libsession/libsession_utils';
import { runners } from '../JobRunner'; import { runners } from '../JobRunner';
import { import {
AddJobCheckReturn, AddJobCheckReturn,
@ -27,7 +21,6 @@ import {
PersistedJob, PersistedJob,
RunJobResult, RunJobResult,
} from '../PersistedJob'; } from '../PersistedJob';
import { assertUnreachable } from '../../../../types/sqlSharedTypes';
const defaultMsBetweenRetries = 15000; // a long time between retries, to avoid running multiple jobs at the same time, when one was postponed at the same time as one already planned (5s) const defaultMsBetweenRetries = 15000; // a long time between retries, to avoid running multiple jobs at the same time, when one was postponed at the same time as one already planned (5s)
const defaultMaxAttempts = 2; const defaultMaxAttempts = 2;
@ -38,50 +31,7 @@ const defaultMaxAttempts = 2;
*/ */
const lastRunConfigSyncJobTimestamps = new Map<string, number | null>(); const lastRunConfigSyncJobTimestamps = new Map<string, number | null>();
export type GroupSuccessfulChange = { async function confirmPushedAndDump(
pushed: PendingChangesForGroup;
updatedHash: string;
};
/**
* This function is run once we get the results from the multiple batch-send.
*/
function resultsToSuccessfulChange(
result: NotEmptyArrayOfBatchResults | null,
request: GroupSingleDestinationChanges
): Array<GroupSuccessfulChange> {
const successfulChanges: Array<GroupSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const batchResult = result[j];
const messagePostedHashes = batchResult?.body?.hash;
if (batchResult.code === 200 && isString(messagePostedHashes) && request.messages?.[j].data) {
// libsession keeps track of the hashes to push and pushed using the hashes now
successfulChanges.push({
updatedHash: messagePostedHashes,
pushed: request.messages?.[j],
});
}
}
return successfulChanges;
}
async function buildAndSaveDumpsToDB(
changes: Array<GroupSuccessfulChange>, changes: Array<GroupSuccessfulChange>,
groupPk: GroupPubkeyType groupPk: GroupPubkeyType
): Promise<void> { ): Promise<void> {
@ -112,37 +62,37 @@ async function buildAndSaveDumpsToDB(
} }
await MetaGroupWrapperActions.metaConfirmPushed(...toConfirm); await MetaGroupWrapperActions.metaConfirmPushed(...toConfirm);
return LibSessionUtil.saveMetaGroupDumpToDb(groupPk); return LibSessionUtil.saveDumpsToDb(groupPk);
} }
async function pushChangesToGroupSwarmIfNeeded(groupPk: GroupPubkeyType): Promise<RunJobResult> { async function pushChangesToGroupSwarmIfNeeded(groupPk: GroupPubkeyType): Promise<RunJobResult> {
// save the dumps to DB even before trying to push them, so at least we have an up to date dumps in the DB in case of crash, no network etc // save the dumps to DB even before trying to push them, so at least we have an up to date dumps in the DB in case of crash, no network etc
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk); await LibSessionUtil.saveDumpsToDb(groupPk);
const singleDestChanges = await LibSessionUtil.pendingChangesForGroup(groupPk); const changesToPush = await LibSessionUtil.pendingChangesForGroup(groupPk);
// If there are no pending changes then the job can just complete (next time something // If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case) // is updated we want to try and run immediately so don't scuedule another run in this case)
if (isEmpty(singleDestChanges?.messages)) { if (isEmpty(changesToPush?.messages)) {
return RunJobResult.Success; return RunJobResult.Success;
} }
const msgs: Array<StoreOnNodeData> = singleDestChanges.messages.map(item => { const msgs: Array<StoreOnNodeData> = changesToPush.messages.map(item => {
return { return {
namespace: item.namespace, namespace: item.namespace,
pubkey: groupPk, pubkey: groupPk,
networkTimestamp: GetNetworkTime.getNowWithNetworkOffset(), networkTimestamp: GetNetworkTime.getNowWithNetworkOffset(),
ttl: TTL_DEFAULT.TTL_CONFIG, ttl: TTL_DEFAULT.TTL_CONFIG,
data: item.data, data: item.ciphertext,
}; };
}); });
const result = await MessageSender.sendEncryptedDataToSnode( const result = await MessageSender.sendEncryptedDataToSnode(
msgs, msgs,
groupPk, groupPk,
singleDestChanges.allOldHashes changesToPush.allOldHashes
); );
const expectedReplyLength = const expectedReplyLength =
singleDestChanges.messages.length + (singleDestChanges.allOldHashes.size ? 1 : 0); changesToPush.messages.length + (changesToPush.allOldHashes.size ? 1 : 0);
// we do a sequence call here. If we do not have the right expected number of results, consider it a failure // we do a sequence call here. If we do not have the right expected number of results, consider it a failure
if (!isArray(result) || result.length !== expectedReplyLength) { if (!isArray(result) || result.length !== expectedReplyLength) {
@ -154,14 +104,14 @@ async function pushChangesToGroupSwarmIfNeeded(groupPk: GroupPubkeyType): Promis
return RunJobResult.RetryJobIfPossible; return RunJobResult.RetryJobIfPossible;
} }
const changes = GroupSync.resultsToSuccessfulChange(result, singleDestChanges); const changes = LibSessionUtil.batchResultsToGroupSuccessfulChange(result, changesToPush);
if (isEmpty(changes)) { if (isEmpty(changes)) {
return RunJobResult.RetryJobIfPossible; return RunJobResult.RetryJobIfPossible;
} }
// Now that we have the successful changes, we need to mark them as pushed and // Now that we have the successful changes, we need to mark them as pushed and
// generate any config dumps which need to be stored // generate any config dumps which need to be stored
await buildAndSaveDumpsToDB(changes, groupPk); await confirmPushedAndDump(changes, groupPk);
return RunJobResult.Success; return RunJobResult.Success;
} }
@ -283,7 +233,6 @@ async function queueNewJobIfNeeded(groupPk: GroupPubkeyType) {
export const GroupSync = { export const GroupSync = {
GroupSyncJob, GroupSyncJob,
pushChangesToGroupSwarmIfNeeded, pushChangesToGroupSwarmIfNeeded,
resultsToSuccessfulChange,
queueNewJobIfNeeded: (groupPk: GroupPubkeyType) => queueNewJobIfNeeded: (groupPk: GroupPubkeyType) =>
allowOnlyOneAtATime(`GroupSyncJob-oneAtAtTime-${groupPk}`, () => queueNewJobIfNeeded(groupPk)), allowOnlyOneAtATime(`GroupSyncJob-oneAtAtTime-${groupPk}`, () => queueNewJobIfNeeded(groupPk)),
}; };

@ -1,8 +1,8 @@
/* eslint-disable no-await-in-loop */ /* eslint-disable no-await-in-loop */
/* eslint-disable import/extensions */ /* eslint-disable import/extensions */
/* eslint-disable import/no-unresolved */ /* eslint-disable import/no-unresolved */
import { GroupPubkeyType } from 'libsession_util_nodejs'; import { GroupPubkeyType, PubkeyType } from 'libsession_util_nodejs';
import { compact, difference, omit } from 'lodash'; import { compact, difference, isString, omit } from 'lodash';
import Long from 'long'; import Long from 'long';
import { UserUtils } from '..'; import { UserUtils } from '..';
import { ConfigDumpData } from '../../../data/configDump/configDump'; import { ConfigDumpData } from '../../../data/configDump/configDump';
@ -20,6 +20,10 @@ import { SnodeNamespaces, UserConfigNamespaces } from '../../apis/snode_api/name
import { ed25519Str } from '../../onions/onionPath'; import { ed25519Str } from '../../onions/onionPath';
import { PubKey } from '../../types'; import { PubKey } from '../../types';
import { ConfigurationSync } from '../job_runners/jobs/ConfigurationSyncJob'; import { ConfigurationSync } from '../job_runners/jobs/ConfigurationSyncJob';
import {
BatchResultEntry,
NotEmptyArrayOfBatchResults,
} from '../../apis/snode_api/SnodeRequestTypes';
const requiredUserVariants: Array<ConfigWrapperUser> = [ const requiredUserVariants: Array<ConfigWrapperUser> = [
'UserConfig', 'UserConfig',
@ -95,88 +99,85 @@ async function initializeLibSessionUtilWrappers() {
// No need to load the meta group wrapper here. We will load them once the SessionInbox is loaded with a redux action // No need to load the meta group wrapper here. We will load them once the SessionInbox is loaded with a redux action
} }
export type PendingChangesForUs = { type PendingChangesShared = {
ciphertext: Uint8Array; ciphertext: Uint8Array;
};
export type PendingChangesForUs = PendingChangesShared & {
seqno: Long; seqno: Long;
namespace: UserConfigNamespaces; namespace: UserConfigNamespaces;
}; };
type PendingChangesForGroupNonKey = { type PendingChangesForGroupNonKey = PendingChangesShared & {
data: Uint8Array;
seqno: Long; seqno: Long;
namespace: SnodeNamespaces.ClosedGroupInfo | SnodeNamespaces.ClosedGroupMembers; namespace: SnodeNamespaces.ClosedGroupInfo | SnodeNamespaces.ClosedGroupMembers;
type: Extract<ConfigWrapperGroupDetailed, 'GroupInfo' | 'GroupMember'>; type: Extract<ConfigWrapperGroupDetailed, 'GroupInfo' | 'GroupMember'>;
}; };
type PendingChangesForGroupKey = { type PendingChangesForGroupKey = {
data: Uint8Array; ciphertext: Uint8Array;
namespace: SnodeNamespaces.ClosedGroupKeys; namespace: SnodeNamespaces.ClosedGroupKeys;
type: Extract<ConfigWrapperGroupDetailed, 'GroupKeys'>; type: Extract<ConfigWrapperGroupDetailed, 'GroupKeys'>;
}; };
export type PendingChangesForGroup = PendingChangesForGroupNonKey | PendingChangesForGroupKey; export type PendingChangesForGroup = PendingChangesForGroupNonKey | PendingChangesForGroupKey;
type SingleDestinationChanges<T extends PendingChangesForGroup | PendingChangesForUs> = { type DestinationChanges<T extends PendingChangesForGroup | PendingChangesForUs> = {
messages: Array<T>; messages: Array<T>;
allOldHashes: Set<string>; allOldHashes: Set<string>;
}; };
export type UserSingleDestinationChanges = SingleDestinationChanges<PendingChangesForUs>; export type UserDestinationChanges = DestinationChanges<PendingChangesForUs>;
export type GroupSingleDestinationChanges = SingleDestinationChanges<PendingChangesForGroup>; export type GroupDestinationChanges = DestinationChanges<PendingChangesForGroup>;
async function pendingChangesForUs(): Promise<UserSingleDestinationChanges> { export type UserSuccessfulChange = {
const us = UserUtils.getOurPubKeyStrFromCache(); pushed: PendingChangesForUs;
const dumps = await ConfigDumpData.getAllDumpsWithoutDataFor(us); updatedHash: string;
};
// Ensure we always check the required user config types for changes even if there is no dump export type GroupSuccessfulChange = {
// data yet (to deal with first launch cases) pushed: PendingChangesForGroup;
LibSessionUtil.requiredUserVariants.forEach(requiredVariant => { updatedHash: string;
if (!dumps.some(m => m.publicKey === us && m.variant === requiredVariant)) { };
dumps.push({
publicKey: us,
variant: requiredVariant,
});
}
});
const results: UserSingleDestinationChanges = { messages: [], allOldHashes: new Set() }; /**
* Fetch what needs to be pushed for all of the current user's wrappers.
*/
async function pendingChangesForUs(): Promise<UserDestinationChanges> {
const results: UserDestinationChanges = { messages: [], allOldHashes: new Set() };
const variantsNeedingPush = new Set<ConfigWrapperUser>(); const variantsNeedingPush = new Set<ConfigWrapperUser>();
const userVariants = LibSessionUtil.requiredUserVariants;
for (let index = 0; index < userVariants.length; index++) {
const variant = userVariants[index];
for (let index = 0; index < dumps.length; index++) {
const dump = dumps[index];
const variant = dump.variant;
if (!isUserConfigWrapperType(variant)) {
// this shouldn't happen for our pubkey.
continue;
}
const needsPush = await GenericWrapperActions.needsPush(variant); const needsPush = await GenericWrapperActions.needsPush(variant);
if (!needsPush) { if (!needsPush) {
continue; continue;
} }
variantsNeedingPush.add(variant);
const { data, seqno, hashes, namespace } = await GenericWrapperActions.push(variant); const { data, seqno, hashes, namespace } = await GenericWrapperActions.push(variant);
variantsNeedingPush.add(variant);
results.messages.push({ results.messages.push({
ciphertext: data, ciphertext: data,
seqno: Long.fromNumber(seqno), seqno: Long.fromNumber(seqno),
namespace, namespace, // we only use the namespace to know to wha
}); });
hashes.forEach(hash => { hashes.forEach(results.allOldHashes.add); // add all the hashes to the set
results.allOldHashes.add(hash);
});
} }
window.log.info(`those variants needs push: "${[...variantsNeedingPush]}"`); window.log.info(`those user variants needs push: "${[...variantsNeedingPush]}"`);
return results; return results;
} }
// we link the namespace to the type of what each wrapper needs /**
* Fetch what needs to be pushed for the specified group public key.
async function pendingChangesForGroup( * @param groupPk the public key of the group to fetch the details off
groupPk: GroupPubkeyType * @returns an object with a list of messages to be pushed and the list of hashes to bump expiry, server side
): Promise<GroupSingleDestinationChanges> { */
async function pendingChangesForGroup(groupPk: GroupPubkeyType): Promise<GroupDestinationChanges> {
if (!PubKey.isClosedGroupV2(groupPk)) { if (!PubKey.isClosedGroupV2(groupPk)) {
throw new Error(`pendingChangesForGroup only works for user or 03 group pubkeys`); throw new Error(`pendingChangesForGroup only works for user or 03 group pubkeys`);
} }
@ -195,7 +196,7 @@ async function pendingChangesForGroup(
if (groupKeys) { if (groupKeys) {
results.push({ results.push({
type: 'GroupKeys', type: 'GroupKeys',
data: groupKeys.data, ciphertext: groupKeys.data,
namespace: groupKeys.namespace, namespace: groupKeys.namespace,
}); });
} }
@ -203,7 +204,7 @@ async function pendingChangesForGroup(
if (groupInfo) { if (groupInfo) {
results.push({ results.push({
type: 'GroupInfo', type: 'GroupInfo',
data: groupInfo.data, ciphertext: groupInfo.data,
seqno: Long.fromNumber(groupInfo.seqno), seqno: Long.fromNumber(groupInfo.seqno),
namespace: groupInfo.namespace, namespace: groupInfo.namespace,
}); });
@ -211,7 +212,7 @@ async function pendingChangesForGroup(
if (groupMember) { if (groupMember) {
results.push({ results.push({
type: 'GroupMember', type: 'GroupMember',
data: groupMember.data, ciphertext: groupMember.data,
seqno: Long.fromNumber(groupMember.seqno), seqno: Long.fromNumber(groupMember.seqno),
namespace: groupMember.namespace, namespace: groupMember.namespace,
}); });
@ -227,7 +228,12 @@ async function pendingChangesForGroup(
return { messages: results, allOldHashes }; return { messages: results, allOldHashes };
} }
/**
* Return the wrapperId associated with a specific namespace.
* WrapperIds are what we use in the database and with the libsession workers calls, and namespace is what we push to.
*/
function userNamespaceToVariant(namespace: UserConfigNamespaces) { function userNamespaceToVariant(namespace: UserConfigNamespaces) {
// TODO Might be worth migrating them to use directly the namespaces?
switch (namespace) { switch (namespace) {
case SnodeNamespaces.UserProfile: case SnodeNamespaces.UserProfile:
return 'UserConfig'; return 'UserConfig';
@ -239,34 +245,141 @@ function userNamespaceToVariant(namespace: UserConfigNamespaces) {
return 'ConvoInfoVolatileConfig'; return 'ConvoInfoVolatileConfig';
default: default:
assertUnreachable(namespace, `userNamespaceToVariant: Unsupported namespace: "${namespace}"`); assertUnreachable(namespace, `userNamespaceToVariant: Unsupported namespace: "${namespace}"`);
throw new Error('userNamespaceToVariant: Unsupported namespace:'); throw new Error('userNamespaceToVariant: Unsupported namespace:'); // ts is not happy without this
}
}
function resultShouldBeIncluded<T extends PendingChangesForGroup | PendingChangesForUs>(
msgPushed: T,
batchResult: BatchResultEntry
) {
const hash = batchResult.body?.hash;
if (batchResult.code === 200 && isString(hash) && msgPushed.ciphertext) {
return {
hash,
pushed: msgPushed,
};
} }
return null;
} }
/** /**
* Returns true if the config needs to be dumped afterwards * This function is run once we get the results from the multiple batch-send for the group push.
* Note: the logic is the same as `batchResultsToUserSuccessfulChange` but I couldn't make typescript happy.
*/ */
async function markAsPushed(variant: ConfigWrapperUser, seqno: number, hash: string) { function batchResultsToGroupSuccessfulChange(
await GenericWrapperActions.confirmPushed(variant, seqno, hash); result: NotEmptyArrayOfBatchResults | null,
return GenericWrapperActions.needsDump(variant); request: GroupDestinationChanges
): Array<GroupSuccessfulChange> {
const successfulChanges: Array<GroupSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const msgPushed = request.messages?.[j];
const shouldBe = resultShouldBeIncluded(msgPushed, result[j]);
if (shouldBe) {
// libsession keeps track of the hashes to push and the one pushed
successfulChanges.push({
updatedHash: shouldBe.hash,
pushed: shouldBe.pushed,
});
}
}
return successfulChanges;
} }
/** /**
* If a dump is needed for that metagroup wrapper, dump it to the Database * This function is run once we get the results from the multiple batch-send for the user push.
* Note: the logic is the same as `batchResultsToGroupSuccessfulChange` but I couldn't make typescript happy.
*/ */
async function saveMetaGroupDumpToDb(groupPk: GroupPubkeyType) { function batchResultsToUserSuccessfulChange(
const metaNeedsDump = await MetaGroupWrapperActions.needsDump(groupPk); result: NotEmptyArrayOfBatchResults | null,
// save the concatenated dumps as a single entry in the DB if any of the dumps had a need for dump request: UserDestinationChanges
if (metaNeedsDump) { ): Array<UserSuccessfulChange> {
const dump = await MetaGroupWrapperActions.metaDump(groupPk); const successfulChanges: Array<UserSuccessfulChange> = [];
/**
* For each batch request, we get as result
* - status code + hash of the new config message
* - status code of the delete of all messages as given by the request hashes.
*
* As it is a sequence, the delete might have failed but the new config message might still be posted.
* So we need to check which request failed, and if it is the delete by hashes, we need to add the hash of the posted message to the list of hashes
*/
if (!result?.length) {
return successfulChanges;
}
for (let j = 0; j < result.length; j++) {
const msgPushed = request.messages?.[j];
const shouldBe = resultShouldBeIncluded(msgPushed, result[j]);
if (shouldBe) {
// libsession keeps track of the hashes to push and the one pushed
successfulChanges.push({
updatedHash: shouldBe.hash,
pushed: shouldBe.pushed,
});
}
}
return successfulChanges;
}
/**
* Check if the wrappers related to that pubkeys need to be dumped to the DB, and if yes, do it.
*/
async function saveDumpsToDb(pubkey: PubkeyType | GroupPubkeyType) {
// first check if this is relating a group
if (PubKey.isClosedGroupV2(pubkey)) {
const metaNeedsDump = await MetaGroupWrapperActions.needsDump(pubkey);
// save the concatenated dumps as a single entry in the DB if any of the dumps had a need for dump
if (metaNeedsDump) {
const dump = await MetaGroupWrapperActions.metaDump(pubkey);
await ConfigDumpData.saveConfigDump({
data: dump,
publicKey: pubkey,
variant: `MetaGroupConfig-${pubkey}`,
});
window.log.debug(`Saved dumps for metagroup ${ed25519Str(pubkey)}`);
} else {
window.log.debug(`No need to update local dumps for metagroup ${ed25519Str(pubkey)}`);
}
return;
}
// here, we can only be called with our current user pubkey
if (pubkey !== UserUtils.getOurPubKeyStrFromCache()) {
throw new Error('saveDumpsToDb only supports groupv2 and us pubkeys');
}
for (let i = 0; i < LibSessionUtil.requiredUserVariants.length; i++) {
const variant = LibSessionUtil.requiredUserVariants[i];
const needsDump = await GenericWrapperActions.needsDump(variant);
if (!needsDump) {
continue;
}
const dump = await GenericWrapperActions.dump(variant);
await ConfigDumpData.saveConfigDump({ await ConfigDumpData.saveConfigDump({
data: dump, data: dump,
publicKey: groupPk, publicKey: pubkey,
variant: `MetaGroupConfig-${groupPk}`, variant,
}); });
window.log.debug(`Saved dumps for metagroup ${ed25519Str(groupPk)}`);
} else {
window.log.debug(`No need to update local dumps for metagroup ${ed25519Str(groupPk)}`);
} }
} }
@ -276,6 +389,7 @@ export const LibSessionUtil = {
requiredUserVariants, requiredUserVariants,
pendingChangesForUs, pendingChangesForUs,
pendingChangesForGroup, pendingChangesForGroup,
markAsPushed, saveDumpsToDb,
saveMetaGroupDumpToDb, batchResultsToGroupSuccessfulChange,
batchResultsToUserSuccessfulChange,
}; };

@ -236,27 +236,22 @@ describe('JobRunner', () => {
expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(job.persistedData.identifier); expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(job.persistedData.identifier);
clock.tick(5000); clock.tick(5000);
console.info('=========== awaiting first job ==========');
await runnerMulti.waitCurrentJob(); await runnerMulti.waitCurrentJob();
// just give some time for the runnerMulti to pick up a new job // just give some time for the runnerMulti to pick up a new job
await sleepFor(10); await sleepFor(10);
expect(runnerMulti.getJobList()).to.deep.eq([]); expect(runnerMulti.getJobList()).to.deep.eq([]);
expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(null); expect(runnerMulti.getCurrentJobIdentifier()).to.be.equal(null);
console.info('=========== awaited first job ==========');
// the first job should already be finished now // the first job should already be finished now
result = await runnerMulti.addJob(job2); result = await runnerMulti.addJob(job2);
expect(result).to.eq('job_started'); expect(result).to.eq('job_started');
expect(runnerMulti.getJobList()).to.deep.eq([job2.serializeJob()]); expect(runnerMulti.getJobList()).to.deep.eq([job2.serializeJob()]);
console.info('=========== awaiting second job ==========');
// each job takes 5s to finish, so let's tick once the first one should be done // each job takes 5s to finish, so let's tick once the first one should be done
clock.tick(5010); clock.tick(5010);
await runnerMulti.waitCurrentJob(); await runnerMulti.waitCurrentJob();
await sleepFor(10); await sleepFor(10);
console.info('=========== awaited second job ==========');
expect(runnerMulti.getJobList()).to.deep.eq([]); expect(runnerMulti.getJobList()).to.deep.eq([]);
}); });

@ -1,6 +1,6 @@
import { expect } from 'chai'; import { expect } from 'chai';
import { GroupPubkeyType } from 'libsession_util_nodejs'; import { GroupPubkeyType } from 'libsession_util_nodejs';
import { omit, pick } from 'lodash'; import { omit } from 'lodash';
import Long from 'long'; import Long from 'long';
import Sinon from 'sinon'; import Sinon from 'sinon';
import { ConfigDumpData } from '../../../../../../data/configDump/configDump'; import { ConfigDumpData } from '../../../../../../data/configDump/configDump';
@ -8,29 +8,27 @@ import { getSodiumNode } from '../../../../../../node/sodiumNode';
import { NotEmptyArrayOfBatchResults } from '../../../../../../session/apis/snode_api/SnodeRequestTypes'; import { NotEmptyArrayOfBatchResults } from '../../../../../../session/apis/snode_api/SnodeRequestTypes';
import { GetNetworkTime } from '../../../../../../session/apis/snode_api/getNetworkTime'; import { GetNetworkTime } from '../../../../../../session/apis/snode_api/getNetworkTime';
import { SnodeNamespaces } from '../../../../../../session/apis/snode_api/namespaces'; import { SnodeNamespaces } from '../../../../../../session/apis/snode_api/namespaces';
import { TTL_DEFAULT } from '../../../../../../session/constants';
import { ConvoHub } from '../../../../../../session/conversations'; import { ConvoHub } from '../../../../../../session/conversations';
import { LibSodiumWrappers } from '../../../../../../session/crypto'; import { LibSodiumWrappers } from '../../../../../../session/crypto';
import { MessageSender } from '../../../../../../session/sending';
import { UserUtils } from '../../../../../../session/utils'; import { UserUtils } from '../../../../../../session/utils';
import { RunJobResult } from '../../../../../../session/utils/job_runners/PersistedJob'; import { RunJobResult } from '../../../../../../session/utils/job_runners/PersistedJob';
import { GroupSync } from '../../../../../../session/utils/job_runners/jobs/GroupConfigJob';
import { import {
GroupDestinationChanges,
GroupSuccessfulChange, GroupSuccessfulChange,
GroupSync,
} from '../../../../../../session/utils/job_runners/jobs/GroupConfigJob';
import {
GroupSingleDestinationChanges,
LibSessionUtil, LibSessionUtil,
PendingChangesForGroup, PendingChangesForGroup,
} from '../../../../../../session/utils/libsession/libsession_utils'; } from '../../../../../../session/utils/libsession/libsession_utils';
import { MetaGroupWrapperActions } from '../../../../../../webworker/workers/browser/libsession_worker_interface'; import { MetaGroupWrapperActions } from '../../../../../../webworker/workers/browser/libsession_worker_interface';
import { TestUtils } from '../../../../../test-utils'; import { TestUtils } from '../../../../../test-utils';
import { MessageSender } from '../../../../../../session/sending';
import { TypedStub } from '../../../../../test-utils/utils'; import { TypedStub } from '../../../../../test-utils/utils';
import { TTL_DEFAULT } from '../../../../../../session/constants';
function validInfo(sodium: LibSodiumWrappers) { function validInfo(sodium: LibSodiumWrappers) {
return { return {
type: 'GroupInfo', type: 'GroupInfo',
data: sodium.randombytes_buf(12), ciphertext: sodium.randombytes_buf(12),
seqno: Long.fromNumber(123), seqno: Long.fromNumber(123),
namespace: SnodeNamespaces.ClosedGroupInfo, namespace: SnodeNamespaces.ClosedGroupInfo,
timestamp: 1234, timestamp: 1234,
@ -39,7 +37,7 @@ function validInfo(sodium: LibSodiumWrappers) {
function validMembers(sodium: LibSodiumWrappers) { function validMembers(sodium: LibSodiumWrappers) {
return { return {
type: 'GroupMember', type: 'GroupMember',
data: sodium.randombytes_buf(12), ciphertext: sodium.randombytes_buf(12),
seqno: Long.fromNumber(321), seqno: Long.fromNumber(321),
namespace: SnodeNamespaces.ClosedGroupMembers, namespace: SnodeNamespaces.ClosedGroupMembers,
timestamp: 4321, timestamp: 4321,
@ -49,13 +47,13 @@ function validMembers(sodium: LibSodiumWrappers) {
function validKeys(sodium: LibSodiumWrappers) { function validKeys(sodium: LibSodiumWrappers) {
return { return {
type: 'GroupKeys', type: 'GroupKeys',
data: sodium.randombytes_buf(12), ciphertext: sodium.randombytes_buf(12),
namespace: SnodeNamespaces.ClosedGroupKeys, namespace: SnodeNamespaces.ClosedGroupKeys,
timestamp: 3333, timestamp: 3333,
} as const; } as const;
} }
describe('GroupSyncJob saveMetaGroupDumpToDb', () => { describe('GroupSyncJob saveDumpsToDb', () => {
let groupPk: GroupPubkeyType; let groupPk: GroupPubkeyType;
beforeEach(async () => {}); beforeEach(async () => {});
@ -71,7 +69,7 @@ describe('GroupSyncJob saveMetaGroupDumpToDb', () => {
Sinon.stub(MetaGroupWrapperActions, 'needsDump').resolves(false); Sinon.stub(MetaGroupWrapperActions, 'needsDump').resolves(false);
const metaDump = Sinon.stub(MetaGroupWrapperActions, 'metaDump').resolves(new Uint8Array()); const metaDump = Sinon.stub(MetaGroupWrapperActions, 'metaDump').resolves(new Uint8Array());
const saveConfigDump = Sinon.stub(ConfigDumpData, 'saveConfigDump').resolves(); const saveConfigDump = Sinon.stub(ConfigDumpData, 'saveConfigDump').resolves();
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk); await LibSessionUtil.saveDumpsToDb(groupPk);
expect(saveConfigDump.callCount).to.be.equal(0); expect(saveConfigDump.callCount).to.be.equal(0);
expect(metaDump.callCount).to.be.equal(0); expect(metaDump.callCount).to.be.equal(0);
}); });
@ -81,7 +79,7 @@ describe('GroupSyncJob saveMetaGroupDumpToDb', () => {
const dump = [1, 2, 3, 4, 5]; const dump = [1, 2, 3, 4, 5];
const metaDump = Sinon.stub(MetaGroupWrapperActions, 'metaDump').resolves(new Uint8Array(dump)); const metaDump = Sinon.stub(MetaGroupWrapperActions, 'metaDump').resolves(new Uint8Array(dump));
const saveConfigDump = Sinon.stub(ConfigDumpData, 'saveConfigDump').resolves(); const saveConfigDump = Sinon.stub(ConfigDumpData, 'saveConfigDump').resolves();
await LibSessionUtil.saveMetaGroupDumpToDb(groupPk); await LibSessionUtil.saveDumpsToDb(groupPk);
expect(saveConfigDump.callCount).to.be.equal(1); expect(saveConfigDump.callCount).to.be.equal(1);
expect(metaDump.callCount).to.be.equal(1); expect(metaDump.callCount).to.be.equal(1);
expect(metaDump.firstCall.args).to.be.deep.eq([groupPk]); expect(metaDump.firstCall.args).to.be.deep.eq([groupPk]);
@ -143,20 +141,20 @@ describe('GroupSyncJob pendingChangesForGroup', () => {
// check for the keys push content // check for the keys push content
expect(result.messages[0]).to.be.deep.eq({ expect(result.messages[0]).to.be.deep.eq({
type: 'GroupKeys', type: 'GroupKeys',
data: new Uint8Array([3, 2, 1]), ciphertext: new Uint8Array([3, 2, 1]),
namespace: 13, namespace: 13,
}); });
// check for the info push content // check for the info push content
expect(result.messages[1]).to.be.deep.eq({ expect(result.messages[1]).to.be.deep.eq({
type: 'GroupInfo', type: 'GroupInfo',
data: new Uint8Array([1, 2, 3]), ciphertext: new Uint8Array([1, 2, 3]),
namespace: 12, namespace: 12,
seqno: Long.fromInt(pushResults.groupInfo.seqno), seqno: Long.fromInt(pushResults.groupInfo.seqno),
}); });
// check for the members pusu content // check for the members pusu content
expect(result.messages[2]).to.be.deep.eq({ expect(result.messages[2]).to.be.deep.eq({
type: 'GroupMember', type: 'GroupMember',
data: new Uint8Array([1, 2]), ciphertext: new Uint8Array([1, 2]),
namespace: 14, namespace: 14,
seqno: Long.fromInt(pushResults.groupMember.seqno), seqno: Long.fromInt(pushResults.groupMember.seqno),
}); });
@ -247,11 +245,14 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
}); });
it('no or empty results return empty array', () => { it('no or empty results return empty array', () => {
expect( expect(
GroupSync.resultsToSuccessfulChange(null, { allOldHashes: new Set(), messages: [] }) LibSessionUtil.batchResultsToGroupSuccessfulChange(null, {
allOldHashes: new Set(),
messages: [],
})
).to.be.deep.eq([]); ).to.be.deep.eq([]);
expect( expect(
GroupSync.resultsToSuccessfulChange([] as any as NotEmptyArrayOfBatchResults, { LibSessionUtil.batchResultsToGroupSuccessfulChange([] as any as NotEmptyArrayOfBatchResults, {
allOldHashes: new Set(), allOldHashes: new Set(),
messages: [], messages: [],
}) })
@ -262,11 +263,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
const member = validMembers(sodium); const member = validMembers(sodium);
const info = validInfo(sodium); const info = validInfo(sodium);
const batchResults: NotEmptyArrayOfBatchResults = [{ code: 200, body: { hash: 'hash1' } }]; const batchResults: NotEmptyArrayOfBatchResults = [{ code: 200, body: { hash: 'hash1' } }];
const request: GroupSingleDestinationChanges = { const request: GroupDestinationChanges = {
allOldHashes: new Set(), allOldHashes: new Set(),
messages: [info, member], messages: [info, member],
}; };
const results = GroupSync.resultsToSuccessfulChange(batchResults, request); const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([ expect(results).to.be.deep.eq([
{ {
updatedHash: 'hash1', updatedHash: 'hash1',
@ -282,11 +283,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
{ code: 200, body: { hash: 'hash1' } }, { code: 200, body: { hash: 'hash1' } },
{ code: 200, body: { hash: 'hash2' } }, { code: 200, body: { hash: 'hash2' } },
]; ];
const request: GroupSingleDestinationChanges = { const request: GroupDestinationChanges = {
allOldHashes: new Set(), allOldHashes: new Set(),
messages: [info, member], messages: [info, member],
}; };
const results = GroupSync.resultsToSuccessfulChange(batchResults, request); const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([ expect(results).to.be.deep.eq([
{ {
updatedHash: 'hash1', updatedHash: 'hash1',
@ -306,11 +307,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
{ code: 200, body: { hash: 123 as any as string } }, { code: 200, body: { hash: 123 as any as string } },
{ code: 200, body: { hash: 'hash2' } }, { code: 200, body: { hash: 'hash2' } },
]; ];
const request: GroupSingleDestinationChanges = { const request: GroupDestinationChanges = {
allOldHashes: new Set(), allOldHashes: new Set(),
messages: [info, member], messages: [info, member],
}; };
const results = GroupSync.resultsToSuccessfulChange(batchResults, request); const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([ expect(results).to.be.deep.eq([
{ {
updatedHash: 'hash2', updatedHash: 'hash2',
@ -322,16 +323,16 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
it('skip request item without data', () => { it('skip request item without data', () => {
const member = validMembers(sodium); const member = validMembers(sodium);
const info = validInfo(sodium); const info = validInfo(sodium);
const infoNoData = omit(info, 'data'); const infoNoData = omit(info, 'ciphertext');
const batchResults: NotEmptyArrayOfBatchResults = [ const batchResults: NotEmptyArrayOfBatchResults = [
{ code: 200, body: { hash: 'hash1' } }, { code: 200, body: { hash: 'hash1' } },
{ code: 200, body: { hash: 'hash2' } }, { code: 200, body: { hash: 'hash2' } },
]; ];
const request: GroupSingleDestinationChanges = { const request: GroupDestinationChanges = {
allOldHashes: new Set(), allOldHashes: new Set(),
messages: [infoNoData as any as PendingChangesForGroup, member], messages: [infoNoData as any as PendingChangesForGroup, member],
}; };
const results = GroupSync.resultsToSuccessfulChange(batchResults, request); const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([ expect(results).to.be.deep.eq([
{ {
updatedHash: 'hash2', updatedHash: 'hash2',
@ -347,11 +348,11 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
{ code: 200, body: { hash: 'hash1' } }, { code: 200, body: { hash: 'hash1' } },
{ code: 401, body: { hash: 'hash2' } }, { code: 401, body: { hash: 'hash2' } },
]; ];
const request: GroupSingleDestinationChanges = { const request: GroupDestinationChanges = {
allOldHashes: new Set(), allOldHashes: new Set(),
messages: [info, member], messages: [info, member],
}; };
const results = GroupSync.resultsToSuccessfulChange(batchResults, request); const results = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results).to.be.deep.eq([ expect(results).to.be.deep.eq([
{ {
updatedHash: 'hash1', updatedHash: 'hash1',
@ -362,7 +363,7 @@ describe('GroupSyncJob resultsToSuccessfulChange', () => {
// another test swapping the results // another test swapping the results
batchResults[0].code = 401; batchResults[0].code = 401;
batchResults[1].code = 200; batchResults[1].code = 200;
const results2 = GroupSync.resultsToSuccessfulChange(batchResults, request); const results2 = LibSessionUtil.batchResultsToGroupSuccessfulChange(batchResults, request);
expect(results2).to.be.deep.eq([ expect(results2).to.be.deep.eq([
{ {
updatedHash: 'hash2', updatedHash: 'hash2',
@ -379,7 +380,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
let sendStub: TypedStub<typeof MessageSender, 'sendEncryptedDataToSnode'>; let sendStub: TypedStub<typeof MessageSender, 'sendEncryptedDataToSnode'>;
let pendingChangesForGroupStub: TypedStub<typeof LibSessionUtil, 'pendingChangesForGroup'>; let pendingChangesForGroupStub: TypedStub<typeof LibSessionUtil, 'pendingChangesForGroup'>;
let saveMetaGroupDumpToDbStub: TypedStub<typeof LibSessionUtil, 'saveMetaGroupDumpToDb'>; let saveDumpsToDbStub: TypedStub<typeof LibSessionUtil, 'saveDumpsToDb'>;
beforeEach(async () => { beforeEach(async () => {
sodium = await getSodiumNode(); sodium = await getSodiumNode();
@ -389,7 +390,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
Sinon.stub(UserUtils, 'getUserED25519KeyPairBytes').resolves(userkeys.ed25519KeyPair); Sinon.stub(UserUtils, 'getUserED25519KeyPairBytes').resolves(userkeys.ed25519KeyPair);
pendingChangesForGroupStub = Sinon.stub(LibSessionUtil, 'pendingChangesForGroup'); pendingChangesForGroupStub = Sinon.stub(LibSessionUtil, 'pendingChangesForGroup');
saveMetaGroupDumpToDbStub = Sinon.stub(LibSessionUtil, 'saveMetaGroupDumpToDb'); saveDumpsToDbStub = Sinon.stub(LibSessionUtil, 'saveDumpsToDb');
sendStub = Sinon.stub(MessageSender, 'sendEncryptedDataToSnode'); sendStub = Sinon.stub(MessageSender, 'sendEncryptedDataToSnode');
}); });
afterEach(() => { afterEach(() => {
@ -402,8 +403,8 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
expect(result).to.be.eq(RunJobResult.Success); expect(result).to.be.eq(RunJobResult.Success);
expect(sendStub.callCount).to.be.eq(0); expect(sendStub.callCount).to.be.eq(0);
expect(pendingChangesForGroupStub.callCount).to.be.eq(1); expect(pendingChangesForGroupStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.callCount).to.be.eq(1); expect(saveDumpsToDbStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.firstCall.args).to.be.deep.eq([groupPk]); expect(saveDumpsToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
}); });
it('calls sendEncryptedDataToSnode with the right data and retry if network returned nothing', async () => { it('calls sendEncryptedDataToSnode with the right data and retry if network returned nothing', async () => {
@ -422,11 +423,18 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
expect(result).to.be.eq(RunJobResult.RetryJobIfPossible); // not returning anything in the sendstub so network issue happened expect(result).to.be.eq(RunJobResult.RetryJobIfPossible); // not returning anything in the sendstub so network issue happened
expect(sendStub.callCount).to.be.eq(1); expect(sendStub.callCount).to.be.eq(1);
expect(pendingChangesForGroupStub.callCount).to.be.eq(1); expect(pendingChangesForGroupStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.callCount).to.be.eq(1); expect(saveDumpsToDbStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.firstCall.args).to.be.deep.eq([groupPk]); expect(saveDumpsToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
function expected(details: any) { function expected(details: any) {
return { ...pick(details, 'data', 'namespace'), ttl, networkTimestamp, pubkey: groupPk }; console.warn('details', details);
return {
namespace: details.namespace,
data: details.ciphertext,
ttl,
networkTimestamp,
pubkey: groupPk,
};
} }
const expectedInfo = expected(info); const expectedInfo = expected(info);
@ -438,7 +446,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
]); ]);
}); });
it('calls sendEncryptedDataToSnode with the right data and retry if network returned nothing', async () => { it('calls sendEncryptedDataToSnode with the right data (and keys) and retry if network returned nothing', async () => {
const info = validInfo(sodium); const info = validInfo(sodium);
const member = validMembers(sodium); const member = validMembers(sodium);
const keys = validKeys(sodium); const keys = validKeys(sodium);
@ -460,7 +468,7 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
updatedHash: 'hash2', updatedHash: 'hash2',
}, },
]; ];
Sinon.stub(GroupSync, 'resultsToSuccessfulChange').returns(changes); Sinon.stub(LibSessionUtil, 'batchResultsToGroupSuccessfulChange').returns(changes);
const metaConfirmPushed = Sinon.stub(MetaGroupWrapperActions, 'metaConfirmPushed').resolves(); const metaConfirmPushed = Sinon.stub(MetaGroupWrapperActions, 'metaConfirmPushed').resolves();
sendStub.resolves([ sendStub.resolves([
@ -473,9 +481,9 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
expect(sendStub.callCount).to.be.eq(1); expect(sendStub.callCount).to.be.eq(1);
expect(pendingChangesForGroupStub.callCount).to.be.eq(1); expect(pendingChangesForGroupStub.callCount).to.be.eq(1);
expect(saveMetaGroupDumpToDbStub.callCount).to.be.eq(2); expect(saveDumpsToDbStub.callCount).to.be.eq(2);
expect(saveMetaGroupDumpToDbStub.firstCall.args).to.be.deep.eq([groupPk]); expect(saveDumpsToDbStub.firstCall.args).to.be.deep.eq([groupPk]);
expect(saveMetaGroupDumpToDbStub.secondCall.args).to.be.deep.eq([groupPk]); expect(saveDumpsToDbStub.secondCall.args).to.be.deep.eq([groupPk]);
expect(metaConfirmPushed.callCount).to.be.eq(1); expect(metaConfirmPushed.callCount).to.be.eq(1);
expect(metaConfirmPushed.firstCall.args).to.be.deep.eq([ expect(metaConfirmPushed.firstCall.args).to.be.deep.eq([
groupPk, groupPk,

Loading…
Cancel
Save