fix: able to send 1o1 messages back

pull/3052/head
Audric Ackermann 7 months ago
parent 745a20d1bc
commit 3cd7d3272b
No known key found for this signature in database

@ -20,7 +20,6 @@ import {
import { DisappearingMessageConversationModeType } from 'libsession_util_nodejs'; import { DisappearingMessageConversationModeType } from 'libsession_util_nodejs';
import { v4 } from 'uuid'; import { v4 } from 'uuid';
import { SignalService } from '../protobuf'; import { SignalService } from '../protobuf';
import { getMessageQueue } from '../session';
import { ConvoHub } from '../session/conversations'; import { ConvoHub } from '../session/conversations';
import { import {
ClosedGroupV2VisibleMessage, ClosedGroupV2VisibleMessage,
@ -139,6 +138,7 @@ import { markAttributesAsReadIfNeeded } from './messageFactory';
import { StoreGroupRequestFactory } from '../session/apis/snode_api/factories/StoreGroupRequestFactory'; import { StoreGroupRequestFactory } from '../session/apis/snode_api/factories/StoreGroupRequestFactory';
import { OpenGroupRequestCommonType } from '../data/types'; import { OpenGroupRequestCommonType } from '../data/types';
import { ConversationTypeEnum, CONVERSATION_PRIORITIES } from './types'; import { ConversationTypeEnum, CONVERSATION_PRIORITIES } from './types';
import { getMessageQueue } from '../session/sending';
type InMemoryConvoInfos = { type InMemoryConvoInfos = {
mentionedUs: boolean; mentionedUs: boolean;
@ -921,15 +921,15 @@ export class ConversationModel extends Backbone.Model<ConversationAttributes> {
* - ignores a off setting for a legacy group (as we can get a setting from restored from configMessage, and a new group can still be in the swarm when linking a device * - ignores a off setting for a legacy group (as we can get a setting from restored from configMessage, and a new group can still be in the swarm when linking a device
*/ */
const shouldAddExpireUpdateMsgLegacyGroup = const shouldAddExpireUpdateMsgLegacyGroup =
fromCurrentDevice || ( fromCurrentDevice ||
isLegacyGroup && (isLegacyGroup &&
!fromConfigMessage && !fromConfigMessage &&
(expirationMode !== this.get('expirationMode') || expireTimer !== this.get('expireTimer')) && (expirationMode !== this.get('expirationMode') ||
expirationMode !== 'off'); expireTimer !== this.get('expireTimer')) &&
expirationMode !== 'off');
const shouldAddExpireUpdateMsgGroupV2 = this.isClosedGroupV2() && !fromConfigMessage; const shouldAddExpireUpdateMsgGroupV2 = this.isClosedGroupV2() && !fromConfigMessage;
const shouldAddExpireUpdateMessage = const shouldAddExpireUpdateMessage =
shouldAddExpireUpdateMsgPrivate || shouldAddExpireUpdateMsgPrivate ||
shouldAddExpireUpdateMsgLegacyGroup || shouldAddExpireUpdateMsgLegacyGroup ||

@ -4,6 +4,7 @@ import { GetNetworkTime } from './getNetworkTime';
import { SnodePool } from './snodePool'; import { SnodePool } from './snodePool';
import { Snode } from '../../../data/types'; import { Snode } from '../../../data/types';
import { GetServiceNodesSubRequest } from './SnodeRequestTypes'; import { GetServiceNodesSubRequest } from './SnodeRequestTypes';
import { SnodePoolConstants } from './snodePoolConstants';
/** /**
* Returns a list of unique snodes got from the specified targetNode. * Returns a list of unique snodes got from the specified targetNode.
@ -64,7 +65,7 @@ async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Snode>> {
*/ */
async function getSnodePoolFromSnodes() { async function getSnodePoolFromSnodes() {
const existingSnodePool = await SnodePool.getSnodePoolFromDBOrFetchFromSeed(); const existingSnodePool = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
if (existingSnodePool.length <= SnodePool.minSnodePoolCount) { if (existingSnodePool.length <= SnodePoolConstants.minSnodePoolCount) {
window?.log?.warn( window?.log?.warn(
'getSnodePoolFromSnodes: Cannot get snodes list from snodes; not enough snodes', 'getSnodePoolFromSnodes: Cannot get snodes list from snodes; not enough snodes',
existingSnodePool.length existingSnodePool.length
@ -101,9 +102,9 @@ async function getSnodePoolFromSnodes() {
} }
); );
// We want the snodes to agree on at least this many snodes // We want the snodes to agree on at least this many snodes
if (commonSnodes.length < SnodePool.requiredSnodesForAgreement) { if (commonSnodes.length < SnodePoolConstants.requiredSnodesForAgreement) {
throw new Error( throw new Error(
`Inconsistent snode pools. We did not get at least ${SnodePool.requiredSnodesForAgreement} in common` `Inconsistent snode pools. We did not get at least ${SnodePoolConstants.requiredSnodesForAgreement} in common`
); );
} }
return commonSnodes; return commonSnodes;

@ -7,8 +7,6 @@ import { SnodePool } from './snodePool';
import { Snode } from '../../../data/types'; import { Snode } from '../../../data/types';
import { SwarmForSubRequest } from './SnodeRequestTypes'; import { SwarmForSubRequest } from './SnodeRequestTypes';
/** /**
* get snodes for pubkey from random snode. Uses an existing snode * get snodes for pubkey from random snode. Uses an existing snode
*/ */
@ -97,6 +95,10 @@ async function requestSnodesForPubkeyRetryable(pubKey: string): Promise<Array<Sn
async () => { async () => {
const targetNode = await SnodePool.getRandomSnode(); const targetNode = await SnodePool.getRandomSnode();
if (!targetNode) {
debugger;
}
return requestSnodesForPubkeyWithTargetNode(pubKey, targetNode); return requestSnodesForPubkeyWithTargetNode(pubKey, targetNode);
}, },
{ {

@ -130,6 +130,7 @@ async function snodeRpcNoRetries(
allow401s: boolean; allow401s: boolean;
} // the user pubkey this call is for. if the onion request fails, this is used to handle the error for this user swarm for instance } // the user pubkey this call is for. if the onion request fails, this is used to handle the error for this user swarm for instance
): Promise<undefined | SnodeResponse> { ): Promise<undefined | SnodeResponse> {
const url = `https://${targetNode.ip}:${targetNode.port}/storage_rpc/v1`; const url = `https://${targetNode.ip}:${targetNode.port}/storage_rpc/v1`;
const body = { const body = {

@ -10,32 +10,8 @@ import { ServiceNodesList } from './getServiceNodesList';
import { requestSnodesForPubkeyFromNetwork } from './getSwarmFor'; import { requestSnodesForPubkeyFromNetwork } from './getSwarmFor';
import { Onions } from '.'; import { Onions } from '.';
import { ed25519Str } from '../../utils/String'; import { ed25519Str } from '../../utils/String';
import { minimumGuardCount, ONION_REQUEST_HOPS } from '../../onions/onionPath'; import { SnodePoolConstants } from './snodePoolConstants';
/**
* If we get less than this snode in a swarm, we fetch new snodes for this pubkey
*/
const minSwarmSnodeCount = 3;
/**
* If we get less than minSnodePoolCount we consider that we need to fetch the new snode pool from a seed node
* and not from those snodes.
*/
export const minSnodePoolCount = minimumGuardCount * (ONION_REQUEST_HOPS + 1) * 2;
/**
* If we get less than this amount of snodes (24), lets try to get an updated list from those while we can
*/
const minSnodePoolCountBeforeRefreshFromSnodes = minSnodePoolCount * 2;
/**
* If we do a request to fetch nodes from snodes and they don't return at least
* the same `requiredSnodesForAgreement` snodes we consider that this is not a valid return.
*
* Too many nodes are not shared for this call to be trustworthy
*/
const requiredSnodesForAgreement = 24;
let randomSnodePool: Array<Snode> = []; let randomSnodePool: Array<Snode> = [];
@ -72,7 +48,8 @@ async function dropSnodeFromSnodePool(snodeEd25519: string) {
*/ */
async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Snode> { async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Snode> {
// make sure we have a few snodes in the pool excluding the one passed as args // make sure we have a few snodes in the pool excluding the one passed as args
const requiredCount = SnodePool.minSnodePoolCount + (excludingEd25519Snode?.length || 0); const requiredCount = SnodePoolConstants.minSnodePoolCount + (excludingEd25519Snode?.length || 0);
debugger;
if (randomSnodePool.length < requiredCount) { if (randomSnodePool.length < requiredCount) {
await SnodePool.getSnodePoolFromDBOrFetchFromSeed(excludingEd25519Snode?.length); await SnodePool.getSnodePoolFromDBOrFetchFromSeed(excludingEd25519Snode?.length);
@ -88,7 +65,12 @@ async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Sn
} }
// We know the pool can't be empty at this point // We know the pool can't be empty at this point
if (!excludingEd25519Snode) { if (!excludingEd25519Snode) {
return _.sample(randomSnodePool) as Snode; const snodePicked = sample(randomSnodePool);
if (!snodePicked) {
console.warn('randomSnodePool', randomSnodePool);
throw new Error('getRandomSnode failed as sample returned none ');
}
return snodePicked;
} }
// we have to double check even after removing the nodes to exclude we still have some nodes in the list // we have to double check even after removing the nodes to exclude we still have some nodes in the list
@ -99,7 +81,11 @@ async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Sn
// used for tests // used for tests
throw new Error(`Not enough snodes with excluding length ${excludingEd25519Snode.length}`); throw new Error(`Not enough snodes with excluding length ${excludingEd25519Snode.length}`);
} }
return _.sample(snodePoolExcluding) as Snode; const snodePicked = sample(snodePoolExcluding);
if (!snodePicked) {
throw new Error('getRandomSnode failed as sample returned none ');
}
return snodePicked;
} }
/** /**
@ -116,7 +102,7 @@ async function forceRefreshRandomSnodePool(): Promise<Array<Snode>> {
// this function throws if it does not have enough snodes to do it // this function throws if it does not have enough snodes to do it
await tryToGetConsensusWithSnodesWithRetries(); await tryToGetConsensusWithSnodesWithRetries();
if (randomSnodePool.length < SnodePool.minSnodePoolCountBeforeRefreshFromSnodes) { if (randomSnodePool.length < SnodePoolConstants.minSnodePoolCountBeforeRefreshFromSnodes) {
throw new Error('forceRefreshRandomSnodePool still too small after refetching from snodes'); throw new Error('forceRefreshRandomSnodePool still too small after refetching from snodes');
} }
} catch (e) { } catch (e) {
@ -148,7 +134,7 @@ async function getSnodePoolFromDBOrFetchFromSeed(
): Promise<Array<Snode>> { ): Promise<Array<Snode>> {
if ( if (
randomSnodePool && randomSnodePool &&
randomSnodePool.length > SnodePool.minSnodePoolCount + countToAddToRequirement randomSnodePool.length > SnodePoolConstants.minSnodePoolCount + countToAddToRequirement
) { ) {
return randomSnodePool; return randomSnodePool;
} }
@ -156,7 +142,7 @@ async function getSnodePoolFromDBOrFetchFromSeed(
if ( if (
!fetchedFromDb || !fetchedFromDb ||
fetchedFromDb.length <= SnodePool.minSnodePoolCount + countToAddToRequirement fetchedFromDb.length <= SnodePoolConstants.minSnodePoolCount + countToAddToRequirement
) { ) {
window?.log?.warn( window?.log?.warn(
`getSnodePoolFromDBOrFetchFromSeed: not enough snodes in db (${fetchedFromDb?.length}), Fetching from seed node instead... ` `getSnodePoolFromDBOrFetchFromSeed: not enough snodes in db (${fetchedFromDb?.length}), Fetching from seed node instead... `
@ -174,7 +160,7 @@ async function getSnodePoolFromDBOrFetchFromSeed(
} }
async function getRandomSnodePool(): Promise<Array<Snode>> { async function getRandomSnodePool(): Promise<Array<Snode>> {
if (randomSnodePool.length <= SnodePool.minSnodePoolCount) { if (randomSnodePool.length <= SnodePoolConstants.minSnodePoolCount) {
await SnodePool.getSnodePoolFromDBOrFetchFromSeed(); await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
} }
return randomSnodePool; return randomSnodePool;
@ -238,7 +224,7 @@ async function tryToGetConsensusWithSnodesWithRetries() {
async () => { async () => {
const commonNodes = await ServiceNodesList.getSnodePoolFromSnodes(); const commonNodes = await ServiceNodesList.getSnodePoolFromSnodes();
if (!commonNodes || commonNodes.length < SnodePool.requiredSnodesForAgreement) { if (!commonNodes || commonNodes.length < SnodePoolConstants.requiredSnodesForAgreement) {
// throwing makes trigger a retry if we have some left. // throwing makes trigger a retry if we have some left.
window?.log?.info( window?.log?.info(
`tryToGetConsensusWithSnodesWithRetries: Not enough common nodes ${commonNodes?.length}` `tryToGetConsensusWithSnodesWithRetries: Not enough common nodes ${commonNodes?.length}`
@ -329,7 +315,7 @@ async function getSwarmFor(pubkey: string): Promise<Array<Snode>> {
// See how many are actually still reachable // See how many are actually still reachable
// the nodes still reachable are the one still present in the snode pool // the nodes still reachable are the one still present in the snode pool
const goodNodes = randomSnodePool.filter((n: Snode) => nodes.indexOf(n.pubkey_ed25519) !== -1); const goodNodes = randomSnodePool.filter((n: Snode) => nodes.indexOf(n.pubkey_ed25519) !== -1);
if (goodNodes.length >= minSwarmSnodeCount) { if (goodNodes.length >= SnodePoolConstants.minSwarmSnodeCount) {
return goodNodes; return goodNodes;
} }
@ -373,11 +359,6 @@ async function getSwarmFromNetworkAndSave(pubkey: string) {
} }
export const SnodePool = { export const SnodePool = {
// constants
minSnodePoolCount,
minSnodePoolCountBeforeRefreshFromSnodes,
requiredSnodesForAgreement,
// snode pool // snode pool
dropSnodeFromSnodePool, dropSnodeFromSnodePool,
forceRefreshRandomSnodePool, forceRefreshRandomSnodePool,

@ -0,0 +1,34 @@
import { minimumGuardCount, ONION_REQUEST_HOPS } from '../../onions/onionPathConstants';
/**
* If we get less than this snode in a swarm, we fetch new snodes for this pubkey
*/
const minSwarmSnodeCount = 3;
/**
* If we get less than minSnodePoolCount we consider that we need to fetch the new snode pool from a seed node
* and not from those snodes.
*/
export const minSnodePoolCount = minimumGuardCount * (ONION_REQUEST_HOPS + 1) * 2;
/**
* If we get less than this amount of snodes (24), lets try to get an updated list from those while we can
*/
const minSnodePoolCountBeforeRefreshFromSnodes = minSnodePoolCount * 2;
/**
* If we do a request to fetch nodes from snodes and they don't return at least
* the same `requiredSnodesForAgreement` snodes we consider that this is not a valid return.
*
* Too many nodes are not shared for this call to be trustworthy
*/
const requiredSnodesForAgreement = 24;
export const SnodePoolConstants = {
// constants
minSnodePoolCount,
minSnodePoolCountBeforeRefreshFromSnodes,
requiredSnodesForAgreement,
minSwarmSnodeCount,
};

@ -13,16 +13,13 @@ import { APPLICATION_JSON } from '../../types/MIME';
import { ERROR_CODE_NO_CONNECT } from '../apis/snode_api/SNodeAPI'; import { ERROR_CODE_NO_CONNECT } from '../apis/snode_api/SNodeAPI';
import { Onions, snodeHttpsAgent } from '../apis/snode_api/onions'; import { Onions, snodeHttpsAgent } from '../apis/snode_api/onions';
import { DURATION } from '../constants'; import { DURATION } from '../constants';
import { UserUtils } from '../utils'; import { UserUtils } from '../utils';
import { allowOnlyOneAtATime } from '../utils/Promise'; import { allowOnlyOneAtATime } from '../utils/Promise';
import { ed25519Str } from '../utils/String'; import { ed25519Str } from '../utils/String';
import { SnodePool } from '../apis/snode_api/snodePool'; import { SnodePool } from '../apis/snode_api/snodePool';
import { SnodePoolConstants } from '../apis/snode_api/snodePoolConstants';
export const desiredGuardCount = 2; import { desiredGuardCount, minimumGuardCount, ONION_REQUEST_HOPS } from './onionPathConstants';
export const minimumGuardCount = 1;
export const ONION_REQUEST_HOPS = 3;
export function getOnionPathMinTimeout() { export function getOnionPathMinTimeout() {
return DURATION.SECONDS; return DURATION.SECONDS;
@ -345,7 +342,7 @@ export async function selectGuardNodes(): Promise<Array<Snode>> {
const nodePool = await SnodePool.getSnodePoolFromDBOrFetchFromSeed(); const nodePool = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
window.log.info(`selectGuardNodes snodePool length: ${nodePool.length}`); window.log.info(`selectGuardNodes snodePool length: ${nodePool.length}`);
if (nodePool.length < SnodePool.minSnodePoolCount) { if (nodePool.length < SnodePoolConstants.minSnodePoolCount) {
window?.log?.error( window?.log?.error(
`Could not select guard nodes. Not enough nodes in the pool: ${nodePool.length}` `Could not select guard nodes. Not enough nodes in the pool: ${nodePool.length}`
); );
@ -451,7 +448,7 @@ async function buildNewOnionPathsWorker() {
// get an up to date list of snodes from cache, from db, or from the a seed node. // get an up to date list of snodes from cache, from db, or from the a seed node.
let allNodes = await SnodePool.getSnodePoolFromDBOrFetchFromSeed(); let allNodes = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
if (allNodes.length <= SnodePool.minSnodePoolCount) { if (allNodes.length <= SnodePoolConstants.minSnodePoolCount) {
throw new Error(`Cannot rebuild path as we do not have enough snodes: ${allNodes.length}`); throw new Error(`Cannot rebuild path as we do not have enough snodes: ${allNodes.length}`);
} }
@ -465,7 +462,7 @@ async function buildNewOnionPathsWorker() {
`SessionSnodeAPI::buildNewOnionPaths, snodePool length: ${allNodes.length}` `SessionSnodeAPI::buildNewOnionPaths, snodePool length: ${allNodes.length}`
); );
// get all snodes minus the selected guardNodes // get all snodes minus the selected guardNodes
if (allNodes.length <= SnodePool.minSnodePoolCount) { if (allNodes.length <= SnodePoolConstants.minSnodePoolCount) {
throw new Error('Too few nodes to build an onion path. Even after fetching from seed.'); throw new Error('Too few nodes to build an onion path. Even after fetching from seed.');
} }
@ -479,7 +476,7 @@ async function buildNewOnionPathsWorker() {
return _.fill(Array(group.length), _.sample(group) as Snode); return _.fill(Array(group.length), _.sample(group) as Snode);
}) })
); );
if (oneNodeForEachSubnet24KeepingRatio.length <= SnodePool.minSnodePoolCount) { if (oneNodeForEachSubnet24KeepingRatio.length <= SnodePoolConstants.minSnodePoolCount) {
throw new Error( throw new Error(
'Too few nodes "unique by ip" to build an onion path. Even after fetching from seed.' 'Too few nodes "unique by ip" to build an onion path. Even after fetching from seed.'
); );

@ -0,0 +1,3 @@
export const desiredGuardCount = 2;
export const minimumGuardCount = 1;
export const ONION_REQUEST_HOPS = 3;

@ -5,7 +5,7 @@ import Sinon, * as sinon from 'sinon';
import { TestUtils } from '../../../test-utils'; import { TestUtils } from '../../../test-utils';
import { Onions } from '../../../../session/apis/snode_api/'; import { Onions } from '../../../../session/apis/snode_api/';
import { minSnodePoolCount, SnodePool } from '../../../../session/apis/snode_api/snodePool'; import { SnodePool } from '../../../../session/apis/snode_api/snodePool';
import { SeedNodeAPI } from '../../../../session/apis/seed_node_api'; import { SeedNodeAPI } from '../../../../session/apis/seed_node_api';
import * as OnionPaths from '../../../../session/onions/onionPath'; import * as OnionPaths from '../../../../session/onions/onionPath';
@ -15,6 +15,7 @@ import {
stubData, stubData,
} from '../../../test-utils/utils'; } from '../../../test-utils/utils';
import { Snode } from '../../../../data/types'; import { Snode } from '../../../../data/types';
import { SnodePoolConstants } from '../../../../session/apis/snode_api/snodePoolConstants';
chai.use(chaiAsPromised as any); chai.use(chaiAsPromised as any);
chai.should(); chai.should();
@ -172,7 +173,7 @@ describe('GuardNodes', () => {
}); });
it('throws if we have to fetch from seed, fetch from seed but not have enough fetched snodes', async () => { it('throws if we have to fetch from seed, fetch from seed but not have enough fetched snodes', async () => {
const invalidLength = minSnodePoolCount - 1; const invalidLength = SnodePoolConstants.minSnodePoolCount - 1;
const invalidSnodePool = fakeSnodePool.slice(0, invalidLength); const invalidSnodePool = fakeSnodePool.slice(0, invalidLength);
stubData('getSnodePoolFromDb').resolves(invalidSnodePool); stubData('getSnodePoolFromDb').resolves(invalidSnodePool);
TestUtils.stubWindow('getSeedNodeList', () => [{ url: 'whatever' }]); TestUtils.stubWindow('getSeedNodeList', () => [{ url: 'whatever' }]);

Loading…
Cancel
Save