chore: fix unit tests with group chunk2

pull/2963/head
Audric Ackermann 1 year ago
parent b9da60af3b
commit a53299377a

@ -37,10 +37,7 @@ import { LeftPaneSectionContainer } from './LeftPaneSectionContainer';
import { SettingsKey } from '../../data/settings-key';
import { getLatestReleaseFromFileServer } from '../../session/apis/file_server_api/FileServerApi';
import {
forceRefreshRandomSnodePool,
getFreshSwarmFor,
} from '../../session/apis/snode_api/snodePool';
import { SnodePool } from '../../session/apis/snode_api/snodePool';
import { UserSync } from '../../session/utils/job_runners/jobs/UserSyncJob';
import { forceSyncConfigurationNowIfNeeded } from '../../session/utils/sync/syncUtils';
import { isDarkTheme } from '../../state/selectors/theme';
@ -210,7 +207,7 @@ const doAppStartUp = async () => {
void triggerSyncIfNeeded();
void getSwarmPollingInstance().start();
void loadDefaultRooms();
void getFreshSwarmFor(UserUtils.getOurPubKeyStrFromCache()); // refresh our swarm on start to speed up the first message fetching event
void SnodePool.getFreshSwarmFor(UserUtils.getOurPubKeyStrFromCache()); // refresh our swarm on start to speed up the first message fetching event
// TODOLATER make this a job of the JobRunner
debounce(triggerAvatarReUploadIfNeeded, 200);
@ -292,7 +289,7 @@ export const ActionsPanel = () => {
}
// trigger an updates from the snodes every hour
void forceRefreshRandomSnodePool();
void SnodePool.forceRefreshRandomSnodePool();
}, DURATION.HOURS * 1);
useTimeoutFn(() => {
@ -300,7 +297,7 @@ export const ActionsPanel = () => {
return;
}
// trigger an updates from the snodes after 5 minutes, once
void forceRefreshRandomSnodePool();
void SnodePool.forceRefreshRandomSnodePool();
}, DURATION.MINUTES * 5);
useInterval(() => {

@ -6,9 +6,9 @@ import { getSodiumRenderer } from '../../crypto';
import { ed25519Str } from '../../onions/onionPath';
import { StringUtils, UserUtils } from '../../utils';
import { fromBase64ToArray, fromHexToArray } from '../../utils/String';
import { doSnodeBatchRequest } from './batchRequest';
import { BatchRequests } from './batchRequest';
import { SnodeSignature } from './signature/snodeSignatures';
import { getNodeFromSwarmOrThrow } from './snodePool';
import { SnodePool } from './snodePool';
export const ERROR_CODE_NO_CONNECT = 'ENETUNREACH: No network connection.';
@ -30,7 +30,7 @@ const forceNetworkDeletion = async (): Promise<Array<string> | null> => {
try {
const maliciousSnodes = await pRetry(
async () => {
const snodeToMakeRequestTo = await getNodeFromSwarmOrThrow(usPk);
const snodeToMakeRequestTo = await SnodePool.getNodeFromSwarmOrThrow(usPk);
return pRetry(
async () => {
@ -39,7 +39,7 @@ const forceNetworkDeletion = async (): Promise<Array<string> | null> => {
namespace,
});
const ret = await doSnodeBatchRequest(
const ret = await BatchRequests.doSnodeBatchRequest(
[{ method, params: { ...signOpts, namespace, pubkey: usPk } }],
snodeToMakeRequestTo,
10000,
@ -190,7 +190,7 @@ const networkDeleteMessages = async (hashes: Array<string>): Promise<Array<strin
try {
const maliciousSnodes = await pRetry(
async () => {
const snodeToMakeRequestTo = await getNodeFromSwarmOrThrow(userX25519PublicKey);
const snodeToMakeRequestTo = await SnodePool.getNodeFromSwarmOrThrow(userX25519PublicKey);
return pRetry(
async () => {
@ -200,7 +200,7 @@ const networkDeleteMessages = async (hashes: Array<string>): Promise<Array<strin
pubkey: userX25519PublicKey,
});
const ret = await doSnodeBatchRequest(
const ret = await BatchRequests.doSnodeBatchRequest(
[{ method, params: signOpts }],
snodeToMakeRequestTo,
10000,

@ -2,7 +2,7 @@ import { isArray } from 'lodash';
import { Snode } from '../../../data/data';
import { MessageSender } from '../../sending';
import { processOnionRequestErrorAtDestination, SnodeResponse } from './onions';
import { snodeRpc } from './sessionRpc';
import { SessionRpc } from './sessionRpc';
import {
builtRequestToLoggingId,
BuiltSnodeSubRequests,
@ -26,7 +26,7 @@ function logSubRequests(requests: Array<BuiltSnodeSubRequests>) {
* @param associatedWith used mostly for handling 421 errors, we need the pubkey the change is associated to
* @param method can be either batch or sequence. A batch call will run all calls even if one of them fails. A sequence call will stop as soon as the first one fails
*/
export async function doSnodeBatchRequest(
async function doSnodeBatchRequest(
subRequests: Array<BuiltSnodeSubRequests>,
targetNode: Snode,
timeout: number,
@ -43,7 +43,7 @@ export async function doSnodeBatchRequest(
`batch subRequests count cannot be more than ${MAX_SUBREQUESTS_COUNT}. Got ${subRequests.length}`
);
}
const result = await snodeRpc({
const result = await SessionRpc.snodeRpc({
method,
params: { requests: subRequests },
targetNode,
@ -76,7 +76,7 @@ export async function doSnodeBatchRequest(
return decoded;
}
export async function doUnsignedSnodeBatchRequest(
async function doUnsignedSnodeBatchRequest(
unsignedSubRequests: Array<RawSnodeSubRequests>,
targetNode: Snode,
timeout: number,
@ -84,7 +84,13 @@ export async function doUnsignedSnodeBatchRequest(
method: MethodBatchType = 'batch'
): Promise<NotEmptyArrayOfBatchResults> {
const signedSubRequests = await MessageSender.signSubRequests(unsignedSubRequests);
return doSnodeBatchRequest(signedSubRequests, targetNode, timeout, associatedWith, method);
return BatchRequests.doSnodeBatchRequest(
signedSubRequests,
targetNode,
timeout,
associatedWith,
method
);
}
/**
@ -113,3 +119,8 @@ function decodeBatchRequest(snodeResponse: SnodeResponse): NotEmptyArrayOfBatchR
}
// "{"results":[{"body":"retrieve signature verification failed","code":401}]}"
}
export const BatchRequests = {
doSnodeBatchRequest,
doUnsignedSnodeBatchRequest,
};

@ -12,8 +12,8 @@ import {
WithShortenOrExtend,
fakeHash,
} from './SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from './batchRequest';
import { getNodeFromSwarmOrThrow } from './snodePool';
import { BatchRequests } from './batchRequest';
import { SnodePool } from './snodePool';
import { ExpireMessageResultItem, ExpireMessagesResultsContent } from './types';
export type verifyExpireMsgsResponseSignatureProps = ExpireMessageResultItem & {
@ -146,7 +146,7 @@ async function updateExpiryOnNodes(
expireRequests: Array<UpdateExpiryOnNodeUserSubRequest>
): Promise<Array<UpdatedExpiryWithHash>> {
try {
const result = await doUnsignedSnodeBatchRequest(
const result = await BatchRequests.doUnsignedSnodeBatchRequest(
expireRequests,
targetNode,
4000,
@ -405,7 +405,7 @@ export async function expireMessagesOnSnode(
expireRequestsParams.map(chunkRequest =>
pRetry(
async () => {
const targetNode = await getNodeFromSwarmOrThrow(ourPubKey);
const targetNode = await SnodePool.getNodeFromSwarmOrThrow(ourPubKey);
return updateExpiryOnNodes(targetNode, ourPubKey, chunkRequest);
},

@ -6,8 +6,8 @@ import { Snode } from '../../../data/data';
import { UserUtils } from '../../utils';
import { SeedNodeAPI } from '../seed_node_api';
import { GetExpiriesFromNodeSubRequest, fakeHash } from './SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from './batchRequest';
import { getNodeFromSwarmOrThrow } from './snodePool';
import { BatchRequests } from './batchRequest';
import { SnodePool } from './snodePool';
import { GetExpiriesResultsContent, WithMessagesHashes } from './types';
export type GetExpiriesRequestResponseResults = Record<string, number>;
@ -46,7 +46,7 @@ async function getExpiriesFromNodes(
) {
try {
const expireRequest = new GetExpiriesFromNodeSubRequest({ messagesHashes: messageHashes });
const result = await doUnsignedSnodeBatchRequest(
const result = await BatchRequests.doUnsignedSnodeBatchRequest(
[expireRequest],
targetNode,
4000,
@ -120,7 +120,7 @@ export async function getExpiriesFromSnode({ messagesHashes }: WithMessagesHashe
try {
const fetchedExpiries = await pRetry(
async () => {
const targetNode = await getNodeFromSwarmOrThrow(ourPubKey);
const targetNode = await SnodePool.getNodeFromSwarmOrThrow(ourPubKey);
return getExpiriesFromNodes(targetNode, messagesHashes, ourPubKey);
},

@ -7,12 +7,12 @@
import { isNumber } from 'lodash';
import { Snode } from '../../../data/data';
import { NetworkTimeSubRequest } from './SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from './batchRequest';
import { BatchRequests } from './batchRequest';
const getNetworkTime = async (snode: Snode): Promise<string | number> => {
const subrequest = new NetworkTimeSubRequest();
const result = await doUnsignedSnodeBatchRequest([subrequest], snode, 4000, null);
const result = await BatchRequests.doUnsignedSnodeBatchRequest([subrequest], snode, 4000, null);
if (!result || !result.length) {
window?.log?.warn(`getNetworkTime on ${snode.ip}:${snode.port} returned falsish value`, result);
throw new Error('getNetworkTime: Invalid result');

@ -1,10 +1,9 @@
import { compact, intersectionWith, sampleSize } from 'lodash';
import { SnodePool } from '.';
import { Snode } from '../../../data/data';
import { GetServiceNodesSubRequest } from './SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from './batchRequest';
import { BatchRequests } from './batchRequest';
import { GetNetworkTime } from './getNetworkTime';
import { minSnodePoolCount, requiredSnodesForAgreement } from './snodePool';
import { SnodePool } from './snodePool';
/**
* Returns a list of unique snodes got from the specified targetNode.
@ -14,7 +13,12 @@ import { minSnodePoolCount, requiredSnodesForAgreement } from './snodePool';
async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Snode>> {
const subrequest = new GetServiceNodesSubRequest();
const results = await doUnsignedSnodeBatchRequest([subrequest], targetNode, 4000, null);
const results = await BatchRequests.doUnsignedSnodeBatchRequest(
[subrequest],
targetNode,
4000,
null
);
const firstResult = results[0];
@ -58,7 +62,7 @@ async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Snode>> {
*/
async function getSnodePoolFromSnodes() {
const existingSnodePool = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
if (existingSnodePool.length <= minSnodePoolCount) {
if (existingSnodePool.length <= SnodePool.minSnodePoolCount) {
window?.log?.warn(
'getSnodePoolFromSnodes: Cannot get snodes list from snodes; not enough snodes',
existingSnodePool.length
@ -95,9 +99,9 @@ async function getSnodePoolFromSnodes() {
}
);
// We want the snodes to agree on at least this many snodes
if (commonSnodes.length < requiredSnodesForAgreement) {
if (commonSnodes.length < SnodePool.requiredSnodesForAgreement) {
throw new Error(
`Inconsistent snode pools. We did not get at least ${requiredSnodesForAgreement} in common`
`Inconsistent snode pools. We did not get at least ${SnodePool.requiredSnodesForAgreement} in common`
);
}
return commonSnodes;

@ -3,9 +3,9 @@ import pRetry from 'p-retry';
import { Snode } from '../../../data/data';
import { PubKey } from '../../types';
import { SwarmForSubRequest } from './SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from './batchRequest';
import { BatchRequests } from './batchRequest';
import { GetNetworkTime } from './getNetworkTime';
import { getRandomSnode } from './snodePool';
import { SnodePool } from './snodePool';
/**
* get snodes for pubkey from random snode. Uses an existing snode
@ -19,7 +19,12 @@ async function requestSnodesForPubkeyWithTargetNodeRetryable(
}
const subrequest = new SwarmForSubRequest(pubkey);
const result = await doUnsignedSnodeBatchRequest([subrequest], targetNode, 4000, pubkey);
const result = await BatchRequests.doUnsignedSnodeBatchRequest(
[subrequest],
targetNode,
4000,
pubkey
);
if (!result || !result.length) {
window?.log?.warn(
@ -87,7 +92,7 @@ async function requestSnodesForPubkeyRetryable(pubKey: string): Promise<Array<Sn
// the idea is that the requestSnodesForPubkeyWithTargetNode will remove a failing targetNode
return pRetry(
async () => {
const targetNode = await getRandomSnode();
const targetNode = await SnodePool.getRandomSnode();
return requestSnodesForPubkeyWithTargetNode(pubKey, targetNode);
},

@ -1,7 +1,6 @@
import * as SnodePool from './snodePool';
import * as SNodeAPI from './SNodeAPI';
import * as Onions from './onions';
import { getSwarmPollingInstance } from './swarmPolling';
export { SnodePool, SNodeAPI, Onions, getSwarmPollingInstance };
export { Onions, SNodeAPI, getSwarmPollingInstance };

@ -9,7 +9,7 @@ import pRetry from 'p-retry';
// eslint-disable-next-line import/no-unresolved
import { AbortSignal as AbortSignalNode } from 'node-fetch/externals';
import { dropSnodeFromSnodePool, dropSnodeFromSwarmIfNeeded, updateSwarmFor } from './snodePool';
import { SnodePool } from './snodePool';
import { OnionPaths } from '../../onions';
import { ed25519Str, incrementBadPathCountOrDrop } from '../../onions/onionPath';
@ -343,10 +343,10 @@ async function handleNodeNotFound({
window?.log?.warn('Handling NODE NOT FOUND with: ', shortNodeNotFound);
if (associatedWith) {
await dropSnodeFromSwarmIfNeeded(associatedWith, ed25519NotFound);
await SnodePool.dropSnodeFromSwarmIfNeeded(associatedWith, ed25519NotFound);
}
await dropSnodeFromSnodePool(ed25519NotFound);
await SnodePool.dropSnodeFromSnodePool(ed25519NotFound);
snodeFailureCount[ed25519NotFound] = 0;
// try to remove the not found snode from any of the paths if it's there.
// it may not be here, as the snode note found might be the target snode of the request.
@ -736,11 +736,11 @@ async function handle421InvalidSwarm({
parsedBody.snodes.map((s: any) => ed25519Str(s.pubkey_ed25519))
);
await updateSwarmFor(associatedWith, parsedBody.snodes);
await SnodePool.updateSwarmFor(associatedWith, parsedBody.snodes);
throw new pRetry.AbortError(ERROR_421_HANDLED_RETRY_REQUEST);
}
// remove this node from the swarm of this pubkey
await dropSnodeFromSwarmIfNeeded(associatedWith, destinationSnodeEd25519);
await SnodePool.dropSnodeFromSwarmIfNeeded(associatedWith, destinationSnodeEd25519);
} catch (e) {
if (e.message !== ERROR_421_HANDLED_RETRY_REQUEST) {
window?.log?.warn(
@ -748,7 +748,7 @@ async function handle421InvalidSwarm({
e
);
// could not parse result. Consider that this snode as invalid
await dropSnodeFromSwarmIfNeeded(associatedWith, destinationSnodeEd25519);
await SnodePool.dropSnodeFromSwarmIfNeeded(associatedWith, destinationSnodeEd25519);
}
}
await Onions.incrementBadSnodeCountOrDrop({
@ -789,9 +789,9 @@ async function incrementBadSnodeCountOrDrop({
);
if (associatedWith) {
await dropSnodeFromSwarmIfNeeded(associatedWith, snodeEd25519);
await SnodePool.dropSnodeFromSwarmIfNeeded(associatedWith, snodeEd25519);
}
await dropSnodeFromSnodePool(snodeEd25519);
await SnodePool.dropSnodeFromSnodePool(snodeEd25519);
snodeFailureCount[snodeEd25519] = 0;
await OnionPaths.dropSnodeFromPath(snodeEd25519);

@ -7,9 +7,9 @@ import {
toHex,
} from '../../utils/String';
import { OnsResolveSubRequest } from './SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from './batchRequest';
import { BatchRequests } from './batchRequest';
import { GetNetworkTime } from './getNetworkTime';
import { getRandomSnode } from './snodePool';
import { SnodePool } from './snodePool';
// ONS name can have [a-zA-Z0-9_-] except that - is not allowed as start or end
// do not define a regex but rather create it on the fly to avoid https://stackoverflow.com/questions/3891641/regex-test-only-works-every-other-time
@ -27,9 +27,14 @@ async function getSessionIDForOnsName(onsNameCase: string) {
// we do this request with validationCount snodes
const promises = range(0, validationCount).map(async () => {
const targetNode = await getRandomSnode();
const targetNode = await SnodePool.getRandomSnode();
const results = await doUnsignedSnodeBatchRequest([subRequest], targetNode, 4000, null);
const results = await BatchRequests.doUnsignedSnodeBatchRequest(
[subRequest],
targetNode,
4000,
null
);
const firstResult = results[0];
if (!firstResult || firstResult.code !== 200 || !firstResult.body) {
throw new Error('ONSresolve:Failed to resolve ONS');

@ -15,7 +15,7 @@ import {
UpdateExpiryOnNodeGroupSubRequest,
UpdateExpiryOnNodeUserSubRequest,
} from './SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from './batchRequest';
import { BatchRequests } from './batchRequest';
import { RetrieveMessagesResultsBatched, RetrieveMessagesResultsContent } from './types';
type RetrieveParams = {
@ -206,7 +206,12 @@ async function retrieveNextMessages(
// let exceptions bubble up
// no retry for this one as this a call we do every few seconds while polling for messages
const results = await doUnsignedSnodeBatchRequest(rawRequests, targetNode, 4000, associatedWith);
const results = await BatchRequests.doUnsignedSnodeBatchRequest(
rawRequests,
targetNode,
4000,
associatedWith
);
if (!results || !results.length) {
window?.log?.warn(
`_retrieveNextMessages - sessionRpc could not talk to ${targetNode.ip}:${targetNode.port}`

@ -109,7 +109,7 @@ async function doRequest({
* -> if the targetNode gets too many errors => we will need to try to do this request again with another target node
* The
*/
export async function snodeRpc(
async function snodeRpc(
{
method,
params,
@ -147,3 +147,5 @@ export async function snodeRpc(
timeout,
});
}
export const SessionRpc = { snodeRpc };

@ -3,7 +3,7 @@ import pRetry from 'p-retry';
import { Data, Snode } from '../../../data/data';
import { Onions, SnodePool } from '.';
import { Onions } from '.';
import { OnionPaths } from '../../onions';
import { ed25519Str } from '../../onions/onionPath';
import { SeedNodeAPI } from '../seed_node_api';
@ -19,12 +19,12 @@ const minSwarmSnodeCount = 3;
* If we get less than minSnodePoolCount we consider that we need to fetch the new snode pool from a seed node
* and not from those snodes.
*/
export const minSnodePoolCount = 12;
const minSnodePoolCount = 12;
/**
* If we get less than this amount of snodes (24), lets try to get an updated list from those while we can
*/
export const minSnodePoolCountBeforeRefreshFromSnodes = minSnodePoolCount * 2;
const minSnodePoolCountBeforeRefreshFromSnodes = minSnodePoolCount * 2;
/**
* If we do a request to fetch nodes from snodes and they don't return at least
@ -32,12 +32,12 @@ export const minSnodePoolCountBeforeRefreshFromSnodes = minSnodePoolCount * 2;
*
* Too many nodes are not shared for this call to be trustworthy
*/
export const requiredSnodesForAgreement = 24;
const requiredSnodesForAgreement = 24;
let randomSnodePool: Array<Snode> = [];
export function TEST_resetState() {
randomSnodePool = [];
function TEST_resetState(snodePoolForTest: Array<Snode> = []) {
randomSnodePool = snodePoolForTest;
swarmCache.clear();
}
@ -49,7 +49,7 @@ const swarmCache: Map<string, Array<string>> = new Map();
* Use `dropSnodeFromSwarmIfNeeded` for that
* @param snodeEd25519 the snode ed25519 to drop from the snode pool
*/
export async function dropSnodeFromSnodePool(snodeEd25519: string) {
async function dropSnodeFromSnodePool(snodeEd25519: string) {
const exists = _.some(randomSnodePool, x => x.pubkey_ed25519 === snodeEd25519);
if (exists) {
_.remove(randomSnodePool, x => x.pubkey_ed25519 === snodeEd25519);
@ -67,11 +67,11 @@ export async function dropSnodeFromSnodePool(snodeEd25519: string) {
* excludingEd25519Snode can be used to exclude some nodes from the random list.
* Useful to rebuild a path excluding existing node already in a path
*/
export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Snode> {
async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Snode> {
// make sure we have a few snodes in the pool excluding the one passed as args
const requiredCount = minSnodePoolCount + (excludingEd25519Snode?.length || 0);
const requiredCount = SnodePool.minSnodePoolCount + (excludingEd25519Snode?.length || 0);
if (randomSnodePool.length < requiredCount) {
await getSnodePoolFromDBOrFetchFromSeed(excludingEd25519Snode?.length);
await SnodePool.getSnodePoolFromDBOrFetchFromSeed(excludingEd25519Snode?.length);
if (randomSnodePool.length < requiredCount) {
window?.log?.warn(
@ -103,9 +103,9 @@ export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Pro
* This function force the snode poll to be refreshed from a random seed node or snodes if we have enough of them.
* This should be called once in a day or so for when the app it kept on.
*/
export async function forceRefreshRandomSnodePool(): Promise<Array<Snode>> {
async function forceRefreshRandomSnodePool(): Promise<Array<Snode>> {
try {
await getSnodePoolFromDBOrFetchFromSeed();
await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
window?.log?.info(
`forceRefreshRandomSnodePool: enough snodes to fetch from them, so we try using them ${randomSnodePool.length}`
@ -113,7 +113,7 @@ export async function forceRefreshRandomSnodePool(): Promise<Array<Snode>> {
// this function throws if it does not have enough snodes to do it
await tryToGetConsensusWithSnodesWithRetries();
if (randomSnodePool.length < minSnodePoolCountBeforeRefreshFromSnodes) {
if (randomSnodePool.length < SnodePool.minSnodePoolCountBeforeRefreshFromSnodes) {
throw new Error('forceRefreshRandomSnodePool still too small after refetching from snodes');
}
} catch (e) {
@ -140,15 +140,21 @@ export async function forceRefreshRandomSnodePool(): Promise<Array<Snode>> {
* Fetches from DB if snode pool is not cached, and returns it if the length is >= 12.
* If length is < 12, fetches from seed an updated list of snodes
*/
export async function getSnodePoolFromDBOrFetchFromSeed(
async function getSnodePoolFromDBOrFetchFromSeed(
countToAddToRequirement = 0
): Promise<Array<Snode>> {
if (randomSnodePool && randomSnodePool.length > minSnodePoolCount + countToAddToRequirement) {
if (
randomSnodePool &&
randomSnodePool.length > SnodePool.minSnodePoolCount + countToAddToRequirement
) {
return randomSnodePool;
}
const fetchedFromDb = await Data.getSnodePoolFromDb();
if (!fetchedFromDb || fetchedFromDb.length <= minSnodePoolCount + countToAddToRequirement) {
if (
!fetchedFromDb ||
fetchedFromDb.length <= SnodePool.minSnodePoolCount + countToAddToRequirement
) {
window?.log?.warn(
`getSnodePoolFromDBOrFetchFromSeed: not enough snodes in db (${fetchedFromDb?.length}), Fetching from seed node instead... `
);
@ -164,9 +170,9 @@ export async function getSnodePoolFromDBOrFetchFromSeed(
return randomSnodePool;
}
export async function getRandomSnodePool(): Promise<Array<Snode>> {
if (randomSnodePool.length <= minSnodePoolCount) {
await getSnodePoolFromDBOrFetchFromSeed();
async function getRandomSnodePool(): Promise<Array<Snode>> {
if (randomSnodePool.length <= SnodePool.minSnodePoolCount) {
await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
}
return randomSnodePool;
}
@ -178,7 +184,7 @@ export async function getRandomSnodePool(): Promise<Array<Snode>> {
* This function does not throw.
*/
export async function TEST_fetchFromSeedWithRetriesAndWriteToDb() {
async function TEST_fetchFromSeedWithRetriesAndWriteToDb() {
const seedNodes = window.getSeedNodeList();
if (!seedNodes || !seedNodes.length) {
@ -217,7 +223,7 @@ async function tryToGetConsensusWithSnodesWithRetries() {
async () => {
const commonNodes = await ServiceNodesList.getSnodePoolFromSnodes();
if (!commonNodes || commonNodes.length < requiredSnodesForAgreement) {
if (!commonNodes || commonNodes.length < SnodePool.requiredSnodesForAgreement) {
// throwing makes trigger a retry if we have some left.
window?.log?.info(
`tryToGetConsensusWithSnodesWithRetries: Not enough common nodes ${commonNodes?.length}`
@ -252,7 +258,7 @@ async function tryToGetConsensusWithSnodesWithRetries() {
* @param pubkey the associatedWith publicKey
* @param snodeToDropEd25519 the snode pubkey to drop
*/
export async function dropSnodeFromSwarmIfNeeded(
async function dropSnodeFromSwarmIfNeeded(
pubkey: string,
snodeToDropEd25519: string
): Promise<void> {
@ -261,7 +267,7 @@ export async function dropSnodeFromSwarmIfNeeded(
`Dropping ${ed25519Str(snodeToDropEd25519)} from swarm of ${ed25519Str(pubkey)}`
);
const existingSwarm = await getSwarmFromCacheOrDb(pubkey);
const existingSwarm = await SnodePool.getSwarmFromCacheOrDb(pubkey);
if (!existingSwarm.includes(snodeToDropEd25519)) {
return;
@ -271,7 +277,7 @@ export async function dropSnodeFromSwarmIfNeeded(
await internalUpdateSwarmFor(pubkey, updatedSwarm);
}
export async function updateSwarmFor(pubkey: string, snodes: Array<Snode>): Promise<void> {
async function updateSwarmFor(pubkey: string, snodes: Array<Snode>): Promise<void> {
const edkeys = snodes.map((sn: Snode) => sn.pubkey_ed25519);
await internalUpdateSwarmFor(pubkey, edkeys);
}
@ -283,7 +289,7 @@ async function internalUpdateSwarmFor(pubkey: string, edkeys: Array<string>) {
await Data.updateSwarmNodesForPubkey(pubkey, edkeys);
}
export async function getSwarmFromCacheOrDb(pubkey: string): Promise<Array<string>> {
async function getSwarmFromCacheOrDb(pubkey: string): Promise<Array<string>> {
// NOTE: important that maybeNodes is not [] here
const existingCache = swarmCache.get(pubkey);
if (existingCache === undefined) {
@ -301,13 +307,12 @@ export async function getSwarmFromCacheOrDb(pubkey: string): Promise<Array<strin
* This call fetch from cache or db the swarm and extract only the one currently reachable.
* If not enough snodes valid are in the swarm, if fetches new snodes for this pubkey from the network.
*/
export async function getSwarmFor(pubkey: string): Promise<Array<Snode>> {
const nodes = await getSwarmFromCacheOrDb(pubkey);
async function getSwarmFor(pubkey: string): Promise<Array<Snode>> {
const nodes = await SnodePool.getSwarmFromCacheOrDb(pubkey);
// See how many are actually still reachable
// the nodes still reachable are the one still present in the snode pool
const goodNodes = randomSnodePool.filter((n: Snode) => nodes.indexOf(n.pubkey_ed25519) !== -1);
if (goodNodes.length >= minSwarmSnodeCount) {
return goodNodes;
}
@ -316,8 +321,8 @@ export async function getSwarmFor(pubkey: string): Promise<Array<Snode>> {
return getSwarmFromNetworkAndSave(pubkey);
}
export async function getNodeFromSwarmOrThrow(pubkey: string): Promise<Snode> {
const swarm = await getSwarmFor(pubkey);
async function getNodeFromSwarmOrThrow(pubkey: string): Promise<Snode> {
const swarm = await SnodePool.getSwarmFor(pubkey);
if (!isEmpty(swarm)) {
const node = sample(swarm);
if (node) {
@ -336,7 +341,7 @@ export async function getNodeFromSwarmOrThrow(pubkey: string): Promise<Snode> {
* @param pubkey the pubkey to request the swarm for
* @returns the fresh swarm, shuffled
*/
export async function getFreshSwarmFor(pubkey: string): Promise<Array<Snode>> {
async function getFreshSwarmFor(pubkey: string): Promise<Array<Snode>> {
return getSwarmFromNetworkAndSave(pubkey);
}
@ -350,3 +355,29 @@ async function getSwarmFromNetworkAndSave(pubkey: string) {
return shuffledSwarm;
}
export const SnodePool = {
// consts
minSnodePoolCount,
minSnodePoolCountBeforeRefreshFromSnodes,
requiredSnodesForAgreement,
// snode pool mgmt
dropSnodeFromSnodePool,
forceRefreshRandomSnodePool,
getRandomSnode,
getRandomSnodePool,
getSnodePoolFromDBOrFetchFromSeed,
// swarm mgmt
dropSnodeFromSwarmIfNeeded,
updateSwarmFor,
getSwarmFromCacheOrDb,
getSwarmFor,
getNodeFromSwarmOrThrow,
getFreshSwarmFor,
// tests
TEST_resetState,
TEST_fetchFromSeedWithRetriesAndWriteToDb,
};

@ -22,7 +22,6 @@ import { SignalService } from '../../../protobuf';
import * as Receiver from '../../../receiver/receiver';
import { PubKey } from '../../types';
import { ERROR_CODE_NO_CONNECT } from './SNodeAPI';
import * as snodePool from './snodePool';
import { ConversationModel } from '../../../models/conversation';
import { ConversationTypeEnum } from '../../../models/conversationAttributes';
@ -44,6 +43,7 @@ import { LibSessionUtil } from '../../utils/libsession/libsession_utils';
import { SnodeNamespace, SnodeNamespaces, SnodeNamespacesUserConfig } from './namespaces';
import { PollForGroup, PollForLegacy, PollForUs } from './pollingTypes';
import { SnodeAPIRetrieve } from './retrieveRequest';
import { SnodePool } from './snodePool';
import { SwarmPollingGroupConfig } from './swarm_polling_config/SwarmPollingGroupConfig';
import { SwarmPollingUserConfig } from './swarm_polling_config/SwarmPollingUserConfig';
import {
@ -339,7 +339,7 @@ export class SwarmPolling {
*/
public async pollOnceForKey([pubkey, type]: PollForUs | PollForLegacy | PollForGroup) {
const namespaces = this.getNamespacesToPollFrom(type);
const swarmSnodes = await snodePool.getSwarmFor(pubkey);
const swarmSnodes = await SnodePool.getSwarmFor(pubkey);
// Select nodes for which we already have lastHashes
const alreadyPolled = swarmSnodes.filter((n: Snode) => this.lastHashes[n.pubkey_ed25519]);

@ -5,7 +5,6 @@ import { MessageParams } from '../Message';
interface TypingMessageParams extends MessageParams {
isTyping: boolean;
typingTimestamp?: number;
}
export class TypingMessage extends ContentMessage {

@ -34,7 +34,8 @@ export class ClosedGroupVisibleMessage extends ClosedGroupMessage {
this.chatMessage = params.chatMessage;
if (
this.chatMessage.expirationType !== 'deleteAfterSend' &&
this.chatMessage.expirationType !== 'unknown'
this.chatMessage.expirationType !== 'unknown' &&
this.chatMessage.expirationType !== null
) {
throw new Error('group visible msg only support DaS and off Disappearing options');
}

@ -11,7 +11,7 @@ import { updateOnionPaths } from '../../state/ducks/onion';
import { APPLICATION_JSON } from '../../types/MIME';
import { ERROR_CODE_NO_CONNECT } from '../apis/snode_api/SNodeAPI';
import { Onions, snodeHttpsAgent } from '../apis/snode_api/onions';
import * as SnodePool from '../apis/snode_api/snodePool';
import { SnodePool } from '../apis/snode_api/snodePool';
import { UserUtils } from '../utils';
import { allowOnlyOneAtATime } from '../utils/Promise';

@ -39,7 +39,7 @@ import {
UpdateExpiryOnNodeGroupSubRequest,
UpdateExpiryOnNodeUserSubRequest,
} from '../apis/snode_api/SnodeRequestTypes';
import { doUnsignedSnodeBatchRequest } from '../apis/snode_api/batchRequest';
import { BatchRequests } from '../apis/snode_api/batchRequest';
import { GetNetworkTime } from '../apis/snode_api/getNetworkTime';
import { SnodeNamespace, SnodeNamespaces } from '../apis/snode_api/namespaces';
import {
@ -48,7 +48,7 @@ import {
SnodeGroupSignature,
} from '../apis/snode_api/signature/groupSignature';
import { SnodeSignature, SnodeSignatureResult } from '../apis/snode_api/signature/snodeSignatures';
import { getNodeFromSwarmOrThrow } from '../apis/snode_api/snodePool';
import { SnodePool } from '../apis/snode_api/snodePool';
import { WithMessagesHashes, WithRevokeSubRequest } from '../apis/snode_api/types';
import { TTL_DEFAULT } from '../constants';
import { ConvoHub } from '../conversations';
@ -129,7 +129,6 @@ async function sendSingleMessage({
found.set({ sent_at: encryptedAndWrapped.networkTimestamp });
await found.commit();
}
const isSyncedDeleteAfterReadMessage =
found &&
UserUtils.isUsFromCache(recipient.key) &&
@ -145,7 +144,6 @@ async function sendSingleMessage({
}
const subRequests: Array<RawSnodeSubRequests> = [];
if (PubKey.is05Pubkey(destination)) {
if (encryptedAndWrapped.namespace === SnodeNamespaces.Default) {
subRequests.push(
@ -229,9 +227,8 @@ async function sendSingleMessage({
);
}
const targetNode = await getNodeFromSwarmOrThrow(destination);
const batchResult = await doUnsignedSnodeBatchRequest(
const targetNode = await SnodePool.getNodeFromSwarmOrThrow(destination);
const batchResult = await BatchRequests.doUnsignedSnodeBatchRequest(
subRequests,
targetNode,
6000,
@ -380,10 +377,10 @@ async function sendMessagesDataToSnode(
unrevokeSubRequest,
]);
const targetNode = await getNodeFromSwarmOrThrow(asssociatedWith);
const targetNode = await SnodePool.getNodeFromSwarmOrThrow(asssociatedWith);
try {
const storeResults = await doUnsignedSnodeBatchRequest(
const storeResults = await BatchRequests.doUnsignedSnodeBatchRequest(
rawRequests,
targetNode,
4000,

@ -31,10 +31,9 @@ function isCommunityToStoreInWrapper(convo: ConversationModel): boolean {
function isLegacyGroupToStoreInWrapper(convo: ConversationModel): boolean {
return (
convo.isGroup() &&
!convo.isPublic() &&
convo.id.startsWith('05') && // new closed groups won't start with 05
PubKey.is05Pubkey(convo.id) && // we only check legacy group here
convo.isActive() &&
!convo.isKickedFromGroup()
!convo.isKickedFromGroup() // we cannot have a left group anymore. We remove it when we leave it.
);
}

@ -265,7 +265,7 @@ describe('SnodeSignature', () => {
});
};
await expect(func()).to.be.rejectedWith(
'generateUpdateExpiryGroupSignature groupPrivKey or groupPk is empty'
'generateUpdateExpiryGroupSignature groupPk is empty'
);
});
@ -284,7 +284,7 @@ describe('SnodeSignature', () => {
});
};
await expect(func()).to.be.rejectedWith(
'generateUpdateExpiryGroupSignature groupPrivKey or groupPk is empty'
'retrieveRequestForGroup: needs either groupSecretKey or authData'
);
});

@ -71,22 +71,38 @@ describe('GetExpiriesRequest', () => {
).to.be.true;
expect(request.params.signature, 'signature should not be empty').to.not.be.empty;
});
it('fails to build a request if our pubkey is missing', async () => {
it('fails to build a request if our pubkey is missing, and throws', async () => {
// Modify the stub behavior for this test only we need to return an unsupported type to simulate a missing pubkey
(getOurPubKeyStrFromCacheStub as any).returns(undefined);
const unsigned = new GetExpiriesFromNodeSubRequest(props);
const request = await unsigned.buildAndSignParameters();
let errorStr = 'fakeerror';
try {
const unsigned = new GetExpiriesFromNodeSubRequest(props);
const request = await unsigned.buildAndSignParameters();
if (request) {
throw new Error('we should not have been able to build a request');
}
} catch (e) {
errorStr = e.message;
}
expect(request, 'should return null').to.be.null;
expect(errorStr).to.be.eq('[GetExpiriesFromNodeSubRequest] No pubkey found');
});
it('fails to build a request if our signature is missing', async () => {
// Modify the stub behavior for this test only we need to return an unsupported type to simulate a missing pubkey
Sinon.stub(SnodeSignature, 'generateGetExpiriesOurSignature').resolves(null);
// TODO audric this should throw debugger
const unsigned = new GetExpiriesFromNodeSubRequest(props);
const request = await unsigned.buildAndSignParameters();
expect(request, 'should return null').to.be.null;
const unsigned = new GetExpiriesFromNodeSubRequest(props);
try {
const request = await unsigned.buildAndSignParameters();
if (request) {
throw new Error('should not be able to build the request');
}
throw new Error('fake error');
} catch (e) {
expect(e.message).to.be.eq(
'[GetExpiriesFromNodeSubRequest] SnodeSignature.generateUpdateExpirySignature returned an empty result messageHash'
);
}
});
});

@ -156,19 +156,22 @@ describe('libsession_metagroup', () => {
describe('members', () => {
it('all fields are accounted for', () => {
const memberCreated = metaGroupWrapper.memberGetOrConstruct(member);
console.info('Object.keys(memberCreated) ', JSON.stringify(Object.keys(memberCreated)));
expect(Object.keys(memberCreated).length).to.be.eq(
8, // if you change this value, also make sure you add a test, testing that new field, below
9, // if you change this value, also make sure you add a test, testing that new field, below
'this test is designed to fail if you need to add tests to test a new field of libsession'
);
});
it('can add member by setting its promoted state', () => {
it('can add member by setting its promoted state, both ok and nok', () => {
metaGroupWrapper.memberSetPromoted(member, false);
expect(metaGroupWrapper.memberGetAll().length).to.be.deep.eq(1);
expect(metaGroupWrapper.memberGetAll()[0]).to.be.deep.eq({
...emptyMember(member),
promoted: true,
promotionPending: true,
promotionFailed: false,
admin: false,
});
metaGroupWrapper.memberSetPromoted(member2, true);
@ -179,15 +182,19 @@ describe('libsession_metagroup', () => {
promoted: true,
promotionFailed: true,
promotionPending: true,
admin: false,
});
// we test the admin: true case below
});
it('can add member by setting its invited state', () => {
it('can add member by setting its invited state, both ok and nok', () => {
metaGroupWrapper.memberSetInvited(member, false); // with invite success
expect(metaGroupWrapper.memberGetAll().length).to.be.deep.eq(1);
expect(metaGroupWrapper.memberGetAll()[0]).to.be.deep.eq({
...emptyMember(member),
invitePending: true,
inviteFailed: false,
});
metaGroupWrapper.memberSetInvited(member2, true); // with invite failed
@ -252,6 +259,20 @@ describe('libsession_metagroup', () => {
expect(metaGroupWrapper.memberGetAll()[0]).to.be.deep.eq(expected);
});
it('can add via admin set', () => {
metaGroupWrapper.memberSetAdmin(member);
expect(metaGroupWrapper.memberGetAll().length).to.be.deep.eq(1);
const expected: GroupMemberGet = {
...emptyMember(member),
admin: true,
promoted: true,
promotionFailed: false,
promotionPending: false,
};
expect(metaGroupWrapper.memberGetAll()[0]).to.be.deep.eq(expected);
});
});
describe('keys', () => {

@ -12,6 +12,7 @@ import {
import { GetNetworkTime } from '../../../../session/apis/snode_api/getNetworkTime';
import { ConvoHub } from '../../../../session/conversations';
import { UserUtils } from '../../../../session/utils';
import { toHex } from '../../../../session/utils/String';
import { SessionUtilUserGroups } from '../../../../session/utils/libsession/libsession_utils_user_groups';
import { TestUtils } from '../../../test-utils';
import { generateFakeECKeyPair, stubWindowLog } from '../../../test-utils/utils';
@ -64,7 +65,7 @@ describe('libsession_user_groups', () => {
const validLegacyGroupArgs = {
...validArgs,
type: ConversationTypeEnum.GROUP,
id: '05123456564',
id: TestUtils.generateFakePubKeyStr(),
} as ConversationAttributes;
it('includes legacy group', () => {
@ -78,14 +79,7 @@ describe('libsession_user_groups', () => {
});
it('exclude legacy group left', () => {
expect(
SessionUtilUserGroups.isUserGroupToStoreInWrapper(
new ConversationModel({
...validLegacyGroupArgs,
left: true,
})
)
).to.be.eq(false);
// we cannot have a left group anymore. It's removed entirely when we leave it
});
it('exclude legacy group kicked', () => {
expect(
@ -168,12 +162,13 @@ describe('libsession_user_groups', () => {
describe('LegacyGroups', () => {
describe('insertGroupsFromDBIntoWrapperAndRefresh', () => {
const asHex = toHex(groupECKeyPair.publicKeyData);
const groupArgs = {
id: groupECKeyPair.publicKeyData.toString(),
id: asHex,
displayNameInProfile: 'Test Group',
expirationMode: 'off',
expireTimer: 0,
members: [groupECKeyPair.publicKeyData.toString()],
members: [asHex],
} as ConversationAttributes;
it('returns wrapper values that match with the inputted group', async () => {

@ -8,6 +8,7 @@ import {
} from '../../../../models/conversationAttributes';
import { UserUtils } from '../../../../session/utils';
import { SessionUtilUserGroups } from '../../../../session/utils/libsession/libsession_utils_user_groups';
import { TestUtils } from '../../../test-utils';
describe('libsession_groups', () => {
describe('filter user groups for wrapper', () => {
@ -46,7 +47,7 @@ describe('libsession_groups', () => {
const validLegacyGroupArgs = {
...validArgs,
type: ConversationTypeEnum.GROUP,
id: '05123456564',
id: TestUtils.generateFakePubKeyStr(),
} as any;
it('includes legacy group', () => {
@ -60,14 +61,7 @@ describe('libsession_groups', () => {
});
it('exclude legacy group left', () => {
expect(
SessionUtilUserGroups.isUserGroupToStoreInWrapper(
new ConversationModel({
...validLegacyGroupArgs,
left: true,
})
)
).to.be.eq(false);
// we cannot have a left group anymore. It's removed entirely when we leave it
});
it('exclude legacy group kicked', () => {
expect(

@ -1,6 +1,5 @@
import { expect } from 'chai';
import { toNumber } from 'lodash';
import Long from 'long';
import { SignalService } from '../../../../protobuf';
import { Constants } from '../../../../session';
@ -33,18 +32,6 @@ describe('TypingMessage', () => {
);
});
it('has typingTimestamp set if value passed', () => {
const message = new TypingMessage({
createAtNetworkTimestamp: Date.now(),
isTyping: true,
typingTimestamp: 111111111,
});
const plainText = message.plainTextBuffer();
const decoded = SignalService.Content.decode(plainText);
const decodedtimestamp = toNumber(decoded.typingMessage?.timestamp);
expect(decodedtimestamp).to.be.equal(111111111);
});
it('has typingTimestamp set with Date.now() if value not passed', () => {
const message = new TypingMessage({
createAtNetworkTimestamp: Date.now(),

@ -40,7 +40,9 @@ describe('ClosedGroupVisibleMessage', () => {
expect(decoded.dataMessage).to.have.deep.property('body', 'body');
// we use the timestamp of the chatMessage as parent timestamp
expect(message).to.have.property('timestamp').to.be.equal(chatMessage.createAtNetworkTimestamp);
expect(message)
.to.have.property('createAtNetworkTimestamp')
.to.be.equal(chatMessage.createAtNetworkTimestamp);
});
it('correct ttl', () => {
@ -75,7 +77,7 @@ describe('ClosedGroupVisibleMessage', () => {
const chatMessage = new VisibleMessage({
createAtNetworkTimestamp,
body: 'body',
identifier: 'chatMessage',
identifier: 'closedGroupMessage',
expirationType: null,
expireTimer: null,
});

@ -1,19 +1,20 @@
import chai from 'chai';
import Sinon, * as sinon from 'sinon';
import { describe } from 'mocha';
import chaiAsPromised from 'chai-as-promised';
import { describe } from 'mocha';
import Sinon, * as sinon from 'sinon';
import { TestUtils } from '../../../test-utils';
import { Onions, SnodePool } from '../../../../session/apis/snode_api';
import { Snode } from '../../../../data/data';
import { Onions } from '../../../../session/apis/snode_api';
import { TestUtils } from '../../../test-utils';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
import { SnodePool } from '../../../../session/apis/snode_api/snodePool';
import * as OnionPaths from '../../../../session/onions/onionPath';
import {
generateFakeSnodes,
generateFakeSnodeWithEdKey,
stubData,
} from '../../../test-utils/utils';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
chai.use(chaiAsPromised as any);
chai.should();

@ -1,24 +1,25 @@
import AbortController from 'abort-controller';
import chai from 'chai';
import Sinon, * as sinon from 'sinon';
import { describe } from 'mocha';
import chaiAsPromised from 'chai-as-promised';
import AbortController from 'abort-controller';
import { describe } from 'mocha';
import Sinon, * as sinon from 'sinon';
import { TestUtils } from '../../../test-utils';
import * as SnodeAPI from '../../../../session/apis/snode_api';
import { TestUtils } from '../../../test-utils';
import { OnionPaths } from '../../../../session/onions';
import { Snode } from '../../../../data/data';
import { SNODE_POOL_ITEM_ID } from '../../../../data/settings-key';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
import { ServiceNodesList } from '../../../../session/apis/snode_api/getServiceNodesList';
import {
NEXT_NODE_NOT_FOUND_PREFIX,
Onions,
OXEN_SERVER_ERROR,
} from '../../../../session/apis/snode_api/onions';
import { Snode } from '../../../../data/data';
import { SnodePool } from '../../../../session/apis/snode_api/snodePool';
import { OnionPaths } from '../../../../session/onions';
import { pathFailureCount } from '../../../../session/onions/onionPath';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
import { generateFakeSnodeWithEdKey, stubData } from '../../../test-utils/utils';
import { ServiceNodesList } from '../../../../session/apis/snode_api/getServiceNodesList';
import { SNODE_POOL_ITEM_ID } from '../../../../data/settings-key';
chai.use(chaiAsPromised as any);
chai.should();
@ -97,8 +98,8 @@ describe('OnionPathsErrors', () => {
updateSwarmSpy = stubData('updateSwarmNodesForPubkey').resolves();
stubData('getItemById').resolves({ id: SNODE_POOL_ITEM_ID, value: '' });
stubData('createOrUpdateItem').resolves();
dropSnodeFromSnodePool = Sinon.spy(SnodeAPI.SnodePool, 'dropSnodeFromSnodePool');
dropSnodeFromSwarmIfNeededSpy = Sinon.spy(SnodeAPI.SnodePool, 'dropSnodeFromSwarmIfNeeded');
dropSnodeFromSnodePool = Sinon.spy(SnodePool, 'dropSnodeFromSnodePool');
dropSnodeFromSwarmIfNeededSpy = Sinon.spy(SnodePool, 'dropSnodeFromSwarmIfNeeded');
dropSnodeFromPathSpy = Sinon.spy(OnionPaths, 'dropSnodeFromPath');
incrementBadPathCountOrDropSpy = Sinon.spy(OnionPaths, 'incrementBadPathCountOrDrop');
incrementBadSnodeCountOrDropSpy = Sinon.spy(Onions, 'incrementBadSnodeCountOrDrop');

@ -1,16 +1,17 @@
import chai from 'chai';
import Sinon from 'sinon';
import chaiAsPromised from 'chai-as-promised';
import { describe } from 'mocha';
import Sinon from 'sinon';
import { Onions } from '../../../../session/apis/snode_api';
import { TestUtils } from '../../../test-utils';
import { Onions, SnodePool } from '../../../../session/apis/snode_api';
import * as OnionPaths from '../../../../session/onions/onionPath';
import { generateFakeSnodes, generateFakeSnodeWithEdKey } from '../../../test-utils/utils';
import { Snode } from '../../../../data/data';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
import { SnodeFromSeed } from '../../../../session/apis/seed_node_api/SeedNodeAPI';
import { Snode } from '../../../../data/data';
import { SnodePool } from '../../../../session/apis/snode_api/snodePool';
import * as OnionPaths from '../../../../session/onions/onionPath';
import { generateFakeSnodes, generateFakeSnodeWithEdKey } from '../../../test-utils/utils';
chai.use(chaiAsPromised as any);
chai.should();

@ -1,19 +1,20 @@
import chai from 'chai';
import Sinon, * as sinon from 'sinon';
import { describe } from 'mocha';
import chaiAsPromised from 'chai-as-promised';
import { describe } from 'mocha';
import Sinon, * as sinon from 'sinon';
import { TestUtils } from '../../../test-utils';
import { Onions, SnodePool } from '../../../../session/apis/snode_api';
import { Snode } from '../../../../data/data';
import { Onions } from '../../../../session/apis/snode_api';
import { TestUtils } from '../../../test-utils';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
import { SnodePool } from '../../../../session/apis/snode_api/snodePool';
import * as OnionPaths from '../../../../session/onions/onionPath';
import {
generateFakeSnodes,
generateFakeSnodeWithEdKey,
stubData,
} from '../../../test-utils/utils';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
chai.use(chaiAsPromised as any);
chai.should();

@ -25,7 +25,7 @@ import { PendingMessageCacheStub } from '../../../test-utils/stubs';
import { SnodeNamespaces } from '../../../../session/apis/snode_api/namespaces';
import { MessageSentHandler } from '../../../../session/sending/MessageSentHandler';
import { stubData } from '../../../test-utils/utils';
import { TypedStub, stubData } from '../../../test-utils/utils';
chai.use(chaiAsPromised as any);
chai.should();
@ -39,9 +39,23 @@ describe('MessageQueue', () => {
// Initialize new stubbed queue
let pendingMessageCache: PendingMessageCacheStub;
let messageSentHandlerFailedStub: sinon.SinonStub;
let messageSentHandlerSuccessStub: sinon.SinonStub;
let messageSentPublicHandlerSuccessStub: sinon.SinonStub;
let messageSentHandlerFailedStub: TypedStub<
typeof MessageSentHandler,
'handleSwarmMessageSentFailure'
>;
let messageSentHandlerSuccessStub: TypedStub<
typeof MessageSentHandler,
'handleSwarmMessageSentSuccess'
>;
let messageSentPublicHandlerSuccessStub: TypedStub<
typeof MessageSentHandler,
'handlePublicMessageSentSuccess'
>;
let handlePublicMessageSentFailureStub: TypedStub<
typeof MessageSentHandler,
'handlePublicMessageSentFailure'
>;
let messageQueueStub: MessageQueue;
// Message Sender Stubs
@ -65,6 +79,10 @@ describe('MessageQueue', () => {
MessageSentHandler,
'handlePublicMessageSentSuccess'
).resolves();
handlePublicMessageSentFailureStub = Sinon.stub(
MessageSentHandler,
'handlePublicMessageSentFailure'
).resolves();
// Init Queue
pendingMessageCache = new PendingMessageCacheStub();
@ -267,6 +285,7 @@ describe('MessageQueue', () => {
it('should emit a fail event if something went wrong', async () => {
sendToOpenGroupV2Stub.resolves({ serverId: -1, serverTimestamp: -1 });
stubData('getMessageById').resolves();
const message = TestUtils.generateOpenGroupVisibleMessage();
const roomInfos = TestUtils.generateOpenGroupV2RoomInfos();
@ -276,8 +295,8 @@ describe('MessageQueue', () => {
blinded: false,
filesToLink: [],
});
expect(messageSentHandlerFailedStub.callCount).to.equal(1);
expect(messageSentHandlerFailedStub.lastCall.args[0].identifier).to.equal(
expect(handlePublicMessageSentFailureStub.callCount).to.equal(1);
expect(handlePublicMessageSentFailureStub.lastCall.args[0].identifier).to.equal(
message.identifier
);
});

@ -7,6 +7,7 @@ import { SignalService } from '../../../../protobuf';
import { OpenGroupMessageV2 } from '../../../../session/apis/open_group_api/opengroupV2/OpenGroupMessageV2';
import { OpenGroupPollingUtils } from '../../../../session/apis/open_group_api/opengroupV2/OpenGroupPollingUtils';
import { SogsBlinding } from '../../../../session/apis/open_group_api/sogsv3/sogsBlinding';
import { BatchRequests } from '../../../../session/apis/snode_api/batchRequest';
import { GetNetworkTime } from '../../../../session/apis/snode_api/getNetworkTime';
import { SnodeNamespaces } from '../../../../session/apis/snode_api/namespaces';
import { Onions } from '../../../../session/apis/snode_api/onions';
@ -19,7 +20,14 @@ import { OutgoingRawMessage, PubKey } from '../../../../session/types';
import { MessageUtils, UserUtils } from '../../../../session/utils';
import { fromBase64ToArrayBuffer } from '../../../../session/utils/String';
import { TestUtils } from '../../../test-utils';
import { stubCreateObjectUrl, stubData, stubUtilWorker } from '../../../test-utils/utils';
import {
TypedStub,
expectAsyncToThrow,
stubCreateObjectUrl,
stubData,
stubUtilWorker,
stubValidSnodeSwarm,
} from '../../../test-utils/utils';
import { TEST_identityKeyPair } from '../crypto/MessageEncrypter_test';
describe('MessageSender', () => {
@ -40,12 +48,13 @@ describe('MessageSender', () => {
describe('send', () => {
const ourNumber = TestUtils.generateFakePubKeyStr();
let sessionMessageAPISendStub: sinon.SinonStub<any>;
let sessionMessageAPISendStub: TypedStub<typeof MessageSender, 'sendMessagesDataToSnode'>;
let doSnodeBatchRequestStub: TypedStub<typeof BatchRequests, 'doSnodeBatchRequest'>;
let encryptStub: sinon.SinonStub<[PubKey, Uint8Array, SignalService.Envelope.Type]>;
beforeEach(() => {
sessionMessageAPISendStub = Sinon.stub(MessageSender, 'sendMessagesDataToSnode').resolves();
doSnodeBatchRequestStub = Sinon.stub(BatchRequests, 'doSnodeBatchRequest').resolves();
stubData('getMessageById').resolves();
encryptStub = Sinon.stub(MessageEncrypter, 'encrypt').resolves({
@ -68,29 +77,34 @@ describe('MessageSender', () => {
});
it('should not retry if an error occurred during encryption', async () => {
encryptStub.throws(new Error('Failed to encrypt.'));
const promise = MessageSender.sendSingleMessage({
message: rawMessage,
attempts: 3,
retryMinTimeout: 10,
isSyncMessage: false,
});
await expect(promise).is.rejectedWith('Failed to encrypt.');
encryptStub.throws(new Error('Failed to encrypt'));
const promise = () =>
MessageSender.sendSingleMessage({
message: rawMessage,
attempts: 3,
retryMinTimeout: 10,
isSyncMessage: false,
});
await expectAsyncToThrow(promise, 'Failed to encrypt');
expect(sessionMessageAPISendStub.callCount).to.equal(0);
});
it('should only call lokiMessageAPI once if no errors occured', async () => {
stubValidSnodeSwarm();
await MessageSender.sendSingleMessage({
message: rawMessage,
attempts: 3,
retryMinTimeout: 10,
isSyncMessage: false,
});
expect(sessionMessageAPISendStub.callCount).to.equal(1);
expect(doSnodeBatchRequestStub.callCount).to.equal(1);
});
it('should only retry the specified amount of times before throwing', async () => {
sessionMessageAPISendStub.throws(new Error('API error'));
stubValidSnodeSwarm();
doSnodeBatchRequestStub.throws(new Error('API error'));
const attempts = 2;
const promise = MessageSender.sendSingleMessage({
message: rawMessage,
@ -99,18 +113,19 @@ describe('MessageSender', () => {
isSyncMessage: false,
});
await expect(promise).is.rejectedWith('API error');
expect(sessionMessageAPISendStub.callCount).to.equal(attempts);
expect(doSnodeBatchRequestStub.callCount).to.equal(attempts);
});
it('should not throw error if successful send occurs within the retry limit', async () => {
sessionMessageAPISendStub.onFirstCall().throws(new Error('API error'));
stubValidSnodeSwarm();
doSnodeBatchRequestStub.onFirstCall().throws(new Error('API error'));
await MessageSender.sendSingleMessage({
message: rawMessage,
attempts: 3,
retryMinTimeout: 10,
isSyncMessage: false,
});
expect(sessionMessageAPISendStub.callCount).to.equal(2);
expect(doSnodeBatchRequestStub.callCount).to.equal(2);
});
});
@ -125,6 +140,8 @@ describe('MessageSender', () => {
});
it('should pass the correct values to lokiMessageAPI', async () => {
TestUtils.setupTestWithSending();
const device = TestUtils.generateFakePubKey();
const visibleMessage = TestUtils.generateVisibleMessage();
Sinon.stub(ConvoHub.use(), 'get').returns(undefined as any);
@ -142,17 +159,27 @@ describe('MessageSender', () => {
isSyncMessage: false,
});
const args = sessionMessageAPISendStub.getCall(0).args;
expect(args[1]).to.equal(device.key);
const args = doSnodeBatchRequestStub.getCall(0).args;
expect(args[3]).to.equal(device.key);
const firstArg = args[0];
expect(firstArg.length).to.equal(1);
if (firstArg[0].method !== 'store') {
throw new Error('expected a store request with data');
}
// expect(args[3]).to.equal(visibleMessage.timestamp); the timestamp is overwritten on sending by the network clock offset
expect(firstArg[0].ttl).to.equal(visibleMessage.ttl());
expect(firstArg[0].pubkey).to.equal(device.key);
expect(firstArg[0].namespace).to.equal(SnodeNamespaces.Default);
expect(firstArg[0].params.ttl).to.equal(visibleMessage.ttl());
expect(firstArg[0].params.pubkey).to.equal(device.key);
expect(firstArg[0].params.namespace).to.equal(SnodeNamespaces.Default);
// the request timestamp is always used fresh with the offset as the request will be denied with a 406 otherwise (clock out of sync)
expect(firstArg[0].params.timestamp).to.be.above(Date.now() - 10);
expect(firstArg[0].params.timestamp).to.be.below(Date.now() + 10);
});
it('should correctly build the envelope and override the timestamp', async () => {
it('should correctly build the envelope and override the request timestamp but not the msg one', async () => {
TestUtils.setupTestWithSending();
messageEncyrptReturnEnvelopeType = SignalService.Envelope.Type.SESSION_MESSAGE;
// This test assumes the encryption stub returns the plainText passed into it.
@ -173,9 +200,12 @@ describe('MessageSender', () => {
isSyncMessage: false,
});
const firstArg = sessionMessageAPISendStub.getCall(0).args[0];
const { data64 } = firstArg[0];
const data = fromBase64ToArrayBuffer(data64);
const firstArg = doSnodeBatchRequestStub.getCall(0).args[0];
if (firstArg[0].method !== 'store') {
throw new Error('expected a store request with data');
}
const data = fromBase64ToArrayBuffer(firstArg[0].params.data);
const webSocketMessage = SignalService.WebSocketMessage.decode(new Uint8Array(data));
expect(webSocketMessage.request?.body).to.not.equal(
undefined,
@ -192,33 +222,22 @@ describe('MessageSender', () => {
expect(envelope.type).to.equal(SignalService.Envelope.Type.SESSION_MESSAGE);
expect(envelope.source).to.equal('');
// the timestamp is overridden on sending with the network offset
const expectedTimestamp = Date.now() - offset;
// the timestamp in the message is not overridden on sending as it should be set with the network offset when created.
// we need that timestamp to not be overriden as the signature of the message depends on it.
const decodedTimestampFromSending = _.toNumber(envelope.timestamp);
expect(decodedTimestampFromSending).to.be.above(expectedTimestamp - 10);
expect(decodedTimestampFromSending).to.be.below(expectedTimestamp + 10);
expect(decodedTimestampFromSending).to.be.eq(visibleMessage.createAtNetworkTimestamp);
// then make sure the plaintextBuffer was overridden too
const visibleMessageExpected = TestUtils.generateVisibleMessage({
timestamp: decodedTimestampFromSending,
});
const rawMessageExpected = await MessageUtils.toRawMessage(
device,
visibleMessageExpected,
0
);
expect(envelope.content).to.deep.equal(rawMessageExpected.plainTextBuffer);
// then, make sure that
});
describe('SESSION_MESSAGE', () => {
it('should set the envelope source to be empty', async () => {
TestUtils.setupTestWithSending();
messageEncyrptReturnEnvelopeType = SignalService.Envelope.Type.SESSION_MESSAGE;
Sinon.stub(ConvoHub.use(), 'get').returns(undefined as any);
// This test assumes the encryption stub returns the plainText passed into it.
const device = TestUtils.generateFakePubKey();
const visibleMessage = TestUtils.generateVisibleMessage();
const rawMessage = await MessageUtils.toRawMessage(
device,
@ -232,9 +251,12 @@ describe('MessageSender', () => {
isSyncMessage: false,
});
const firstArg = sessionMessageAPISendStub.getCall(0).args[0];
const { data64 } = firstArg[0];
const data = fromBase64ToArrayBuffer(data64);
const firstArg = doSnodeBatchRequestStub.getCall(0).args[0];
if (firstArg[0].method !== 'store') {
throw new Error('expected a store request with data');
}
const data = fromBase64ToArrayBuffer(firstArg[0].params.data);
const webSocketMessage = SignalService.WebSocketMessage.decode(new Uint8Array(data));
expect(webSocketMessage.request?.body).to.not.equal(
undefined,

@ -2,12 +2,12 @@
/* eslint-disable no-await-in-loop */
/* eslint-disable no-restricted-syntax */
import { expect } from 'chai';
import Sinon from 'sinon';
import * as _ from 'lodash';
import Sinon from 'sinon';
import { SnodeNamespaces } from '../../../../session/apis/snode_api/namespaces';
import { PendingMessageCache } from '../../../../session/sending/PendingMessageCache';
import { MessageUtils } from '../../../../session/utils';
import { TestUtils } from '../../../test-utils';
import { PendingMessageCache } from '../../../../session/sending/PendingMessageCache';
import { SnodeNamespaces } from '../../../../session/apis/snode_api/namespaces';
// Equivalent to Data.StorageItem
interface StorageItem {
@ -300,8 +300,8 @@ describe('PendingMessageCache', () => {
expect(buffersCompare).to.equal(true, 'buffers were not loaded properly from database');
// Compare all other valures
const trimmedAdded = _.omit(addedMessage, ['plainTextBuffer']);
const trimmedRebuilt = _.omit(message, ['plainTextBuffer']);
const trimmedAdded = _.omit(addedMessage, ['plainTextBuffer', 'plainTextBufferHex']);
const trimmedRebuilt = _.omit(message, ['plainTextBuffer', 'plainTextBufferHex']);
expect(_.isEqual(trimmedAdded, trimmedRebuilt)).to.equal(
true,

@ -32,6 +32,7 @@ describe('SwarmPolling:getNamespacesToPollFrom', () => {
it('for group v2 (03 prefix) ', () => {
expect(swarmPolling.getNamespacesToPollFrom(ConversationTypeEnum.GROUPV2)).to.deep.equal([
SnodeNamespaces.ClosedGroupRevokedRetrievableMessages,
SnodeNamespaces.ClosedGroupMessages,
SnodeNamespaces.ClosedGroupInfo,
SnodeNamespaces.ClosedGroupMembers,

@ -10,9 +10,10 @@ import {
} from 'libsession_util_nodejs';
import { ConversationModel, Convo } from '../../../../models/conversation';
import { ConversationTypeEnum } from '../../../../models/conversationAttributes';
import { SnodePool, getSwarmPollingInstance } from '../../../../session/apis/snode_api';
import { getSwarmPollingInstance } from '../../../../session/apis/snode_api';
import { resetHardForkCachedValues } from '../../../../session/apis/snode_api/hfHandling';
import { SnodeAPIRetrieve } from '../../../../session/apis/snode_api/retrieveRequest';
import { SnodePool } from '../../../../session/apis/snode_api/snodePool';
import { SwarmPolling } from '../../../../session/apis/snode_api/swarmPolling';
import { ConvoHub } from '../../../../session/conversations';
import { PubKey } from '../../../../session/types';

@ -42,7 +42,7 @@ describe('Message Utils', () => {
SnodeNamespaces.UserContacts
);
expect(Object.keys(rawMessage)).to.have.length(6);
expect(Object.keys(rawMessage)).to.have.length(7);
expect(rawMessage.identifier).to.exist;
expect(rawMessage.namespace).to.exist;
@ -50,12 +50,14 @@ describe('Message Utils', () => {
expect(rawMessage.encryption).to.exist;
expect(rawMessage.plainTextBuffer).to.exist;
expect(rawMessage.ttl).to.exist;
expect(rawMessage.networkTimestampCreated).to.exist;
expect(rawMessage.identifier).to.equal(message.identifier);
expect(rawMessage.device).to.equal(device.key);
expect(rawMessage.plainTextBuffer).to.deep.equal(message.plainTextBuffer());
expect(rawMessage.ttl).to.equal(message.ttl());
expect(rawMessage.namespace).to.equal(3);
expect(rawMessage.networkTimestampCreated).to.eq(message.createAtNetworkTimestamp);
});
it('should generate valid plainTextBuffer', async () => {

@ -271,13 +271,14 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
});
it('call savesDumpToDb even if no changes are required on the serverside', async () => {
pendingChangesForGroupStub.resolves({ allOldHashes: new Set(), messages: [] });
const result = await GroupSync.pushChangesToGroupSwarmIfNeeded({
groupPk,
revokeSubRequest: null,
unrevokeSubRequest: null,
supplementKeys: [],
});
pendingChangesForGroupStub.resolves(undefined);
expect(result).to.be.eq(RunJobResult.Success);
expect(sendStub.callCount).to.be.eq(0);
expect(pendingChangesForGroupStub.callCount).to.be.eq(1);
@ -286,6 +287,8 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
});
it('calls sendEncryptedDataToSnode with the right data and retry if network returned nothing', async () => {
TestUtils.stubLibSessionWorker(undefined);
const info = validInfo(sodium);
const member = validMembers(sodium);
const networkTimestamp = 4444;
@ -311,24 +314,34 @@ describe('GroupSyncJob pushChangesToGroupSwarmIfNeeded', () => {
function expected(details: any) {
return {
dbMessageIdentifier: null,
namespace: details.namespace,
data: details.ciphertext,
ttl,
networkTimestamp,
pubkey: groupPk,
encryptedData: details.ciphertext,
ttlMs: ttl,
destination: groupPk,
method: 'store',
};
}
const expectedInfo = expected(info);
const expectedMember = expected(member);
expect(sendStub.firstCall.args).to.be.deep.eq([
[expectedInfo, expectedMember],
groupPk,
new Set('123'),
]);
const callArgs = sendStub.firstCall.args[0];
// we don't want to check the content of the request in this unit test, just the structure/count of them
// callArgs.storeRequests = callArgs.storeRequests.map(_m => null) as any;
const expectedArgs = {
storeRequests: [expectedInfo, expectedMember],
destination: groupPk,
messagesHashesToDelete: new Set('123'),
unrevokeSubRequest: null,
revokeSubRequest: null,
};
expect(callArgs).to.be.deep.eq(expectedArgs);
});
it('calls sendEncryptedDataToSnode with the right data (and keys) and retry if network returned nothing', async () => {
TestUtils.stubLibSessionWorker(undefined);
const info = validInfo(sodium);
const member = validMembers(sodium);
const keys = validKeys(sodium);

@ -299,20 +299,27 @@ describe('UserSyncJob pushChangesToUserSwarmIfNeeded', () => {
function expected(details: any) {
return {
namespace: details.namespace,
data: details.ciphertext,
ttl,
networkTimestamp,
pubkey: sessionId,
encryptedData: details.ciphertext,
ttlMs: ttl,
destination: sessionId,
method: 'store',
};
}
const expectedProfile = expected(profile);
const expectedContact = expected(contact);
expect(sendStub.firstCall.args).to.be.deep.eq([
[expectedProfile, expectedContact],
sessionId,
new Set('123'),
]);
const callArgs = sendStub.firstCall.args[0];
// we don't want to check the content of the request in this unit test, just the structure/count of them
const expectedArgs = {
storeRequests: [expectedProfile, expectedContact],
destination: sessionId,
messagesHashesToDelete: new Set('123'),
unrevokeSubRequest: null,
revokeSubRequest: null,
};
// callArgs.storeRequests = callArgs.storeRequests.map(_m => null) as any;
expect(callArgs).to.be.deep.eq(expectedArgs);
});
it('calls sendEncryptedDataToSnode with the right data x3 and retry if network returned nothing then success', async () => {

@ -5,8 +5,10 @@ import _ from 'lodash';
import { Snode } from '../../../data/data';
import { getSodiumNode } from '../../../node/sodiumNode';
import { ECKeyPair } from '../../../receiver/keypairs';
import { SnodePool } from '../../../session/apis/snode_api/snodePool';
import { PubKey } from '../../../session/types';
import { ByteKeyPair } from '../../../session/utils/User';
import { stubData } from './stubbing';
export function generateFakePubKey(): PubKey {
// Generates a mock pubkey for testing
@ -91,9 +93,13 @@ export function generateFakePubKeys(amount: number): Array<PubKey> {
return new Array(numPubKeys).fill(0).map(() => generateFakePubKey());
}
export function generateFakeSwarmFor(): Array<string> {
return generateFakePubKeys(6).map(m => m.key);
}
export function generateFakeSnode(): Snode {
return {
ip: `136.243.${Math.random() * 255}.${Math.random() * 255}`,
ip: `136.243.${Math.floor(Math.random() * 255)}.${Math.floor(Math.random() * 255)}`,
port: 22116,
pubkey_x25519: generateFakePubKeyStr(),
pubkey_ed25519: generateFakePubKeyStr(),
@ -113,3 +119,15 @@ export function generateFakeSnodes(amount: number): Array<Snode> {
const ar: Array<Snode> = _.times(amount, generateFakeSnode);
return ar;
}
/**
* this function can be used to setup unit test which relies on fetching a snodepool
*/
export function setupTestWithSending() {
const snodes = generateFakeSnodes(20);
const swarm = snodes.slice(0, 6);
SnodePool.TEST_resetState(snodes);
stubData('getSwarmNodesForPubkey').resolves(swarm.map(m => m.pubkey_ed25519));
return { snodes, swarm };
}

@ -6,6 +6,8 @@ import { ConfigDumpData } from '../../../data/configDump/configDump';
import { Data } from '../../../data/data';
import { OpenGroupData } from '../../../data/opengroups';
import { TestUtils } from '..';
import { SnodePool } from '../../../session/apis/snode_api/snodePool';
import { BlockedNumberController } from '../../../util';
import * as libsessionWorker from '../../../webworker/workers/browser/libsession_worker_interface';
import * as utilWorker from '../../../webworker/workers/browser/util_worker_interface';
@ -122,3 +124,13 @@ export type TypedStub<T extends Record<string, unknown>, K extends keyof T> = T[
) => any
? Sinon.SinonStub<Parameters<T[K]>, ReturnType<T[K]>>
: never;
export function stubValidSnodeSwarm() {
const snodes = TestUtils.generateFakeSnodes(20);
SnodePool.TEST_resetState(snodes);
const swarm = snodes.slice(0, 6);
Sinon.stub(SnodePool, 'getSwarmFor').resolves(swarm);
return { snodes, swarm };
}

Loading…
Cancel
Save