}
diff --git a/ts/receiver/configMessage.ts b/ts/receiver/configMessage.ts
index cad2b845f..c00c3404d 100644
--- a/ts/receiver/configMessage.ts
+++ b/ts/receiver/configMessage.ts
@@ -582,11 +582,13 @@ async function handleLegacyGroupUpdate(latestEnvelopeTimestamp: number) {
toLeave.id
);
const toLeaveFromDb = ConvoHub.use().get(toLeave.id);
- // the wrapper told us that this group is not tracked, so even if we left/got kicked from it, remove it from the DB completely
- await ConvoHub.use().deleteLegacyGroup(toLeaveFromDb.id, {
- fromSyncMessage: true,
- sendLeaveMessage: false, // this comes from the wrapper, so we must have left/got kicked from that group already and our device already handled it.
- });
+ if (PubKey.is05Pubkey(toLeaveFromDb.id)) {
+ // the wrapper told us that this group is not tracked, so even if we left/got kicked from it, remove it from the DB completely
+ await ConvoHub.use().deleteLegacyGroup(toLeaveFromDb.id, {
+ fromSyncMessage: true,
+ sendLeaveMessage: false, // this comes from the wrapper, so we must have left/got kicked from that group already and our device already handled it.
+ });
+ }
}
for (let index = 0; index < legacyGroupsToJoinInDB.length; index++) {
diff --git a/ts/session/apis/snode_api/onions.ts b/ts/session/apis/snode_api/onions.ts
index 2fe7410d0..30243aa35 100644
--- a/ts/session/apis/snode_api/onions.ts
+++ b/ts/session/apis/snode_api/onions.ts
@@ -1135,10 +1135,7 @@ function getPathString(pathObjArr: Array<{ ip: string; port: number }>): string
return pathObjArr.map(node => `${node.ip}:${node.port}`).join(', ');
}
-/**
- * If the fetch throws a retryable error we retry this call with a new path at most 3 times. If another error happens, we return it. If we have a result we just return it.
- */
-async function lokiOnionFetchWithRetries({
+async function lokiOnionFetchNoRetries({
targetNode,
associatedWith,
body,
@@ -1152,33 +1149,17 @@ async function lokiOnionFetchWithRetries({
allow401s: boolean;
}): Promise {
try {
- const retriedResult = await pRetry(
- async () => {
- // Get a path excluding `targetNode`:
- const path = await OnionPaths.getOnionPath({ toExclude: targetNode });
- const result = await sendOnionRequestSnodeDestNoRetries(
- path,
- targetNode,
- headers,
- body,
- allow401s,
- associatedWith
- );
- return result;
- },
- {
- retries: 3,
- factor: 1,
- minTimeout: 100,
- onFailedAttempt: e => {
- window?.log?.warn(
- `onionFetchRetryable attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left...`
- );
- },
- }
+ // Get a path excluding `targetNode`:
+ const path = await OnionPaths.getOnionPath({ toExclude: targetNode });
+ const result = await sendOnionRequestSnodeDestNoRetries(
+ path,
+ targetNode,
+ headers,
+ body,
+ allow401s,
+ associatedWith
);
-
- return retriedResult as SnodeResponse | undefined;
+ return result as SnodeResponse | undefined;
} catch (e) {
window?.log?.warn('onionFetchRetryable failed ', e.message);
if (e?.errno === 'ENETUNREACH') {
@@ -1197,7 +1178,7 @@ export const Onions = {
sendOnionRequestHandlingSnodeEjectNoRetries,
incrementBadSnodeCountOrDrop,
decodeOnionResult,
- lokiOnionFetchWithRetries,
+ lokiOnionFetchNoRetries,
getPathString,
sendOnionRequestSnodeDestNoRetries,
processOnionResponse,
diff --git a/ts/session/apis/snode_api/sessionRpc.ts b/ts/session/apis/snode_api/sessionRpc.ts
index 82c6818b8..56d1b5746 100644
--- a/ts/session/apis/snode_api/sessionRpc.ts
+++ b/ts/session/apis/snode_api/sessionRpc.ts
@@ -54,7 +54,7 @@ async function doRequestNoRetries({
? true
: window.sessionFeatureFlags?.useOnionRequests;
if (useOnionRequests && targetNode) {
- const fetchResult = await Onions.lokiOnionFetchWithRetries({
+ const fetchResult = await Onions.lokiOnionFetchNoRetries({
targetNode,
body: fetchOptions.body,
headers: fetchOptions.headers,
diff --git a/ts/session/utils/Toast.tsx b/ts/session/utils/Toast.tsx
index 6c6225ff5..4793f9363 100644
--- a/ts/session/utils/Toast.tsx
+++ b/ts/session/utils/Toast.tsx
@@ -11,11 +11,18 @@ export function pushToastError(id: string, description: string) {
});
}
-export function pushToastWarning(id: string, description: string) {
- toast.warning(, {
- toastId: id,
- updateId: id,
- });
+export function pushToastWarning(id: string, description: string, onToastClick?: () => void) {
+ toast.warning(
+ ,
+ {
+ toastId: id,
+ updateId: id,
+ }
+ );
}
export function pushToastInfo(
diff --git a/ts/session/utils/job_runners/jobs/GroupInviteJob.ts b/ts/session/utils/job_runners/jobs/GroupInviteJob.ts
index b24687c93..9fb8aba10 100644
--- a/ts/session/utils/job_runners/jobs/GroupInviteJob.ts
+++ b/ts/session/utils/job_runners/jobs/GroupInviteJob.ts
@@ -18,6 +18,8 @@ import {
PersistedJob,
RunJobResult,
} from '../PersistedJob';
+import { LibSessionUtil } from '../../libsession/libsession_utils';
+import { showUpdateGroupMembersByConvoId } from '../../../../interactions/conversationInteractions';
const defaultMsBetweenRetries = 10000;
const defaultMaxAttemps = 1;
@@ -51,15 +53,9 @@ async function addJob({ groupPk, member }: JobExtraArgs) {
nextAttemptTimestamp: Date.now(),
});
window.log.debug(`addGroupInviteJob: adding group invite for ${groupPk}:${member} `);
- try {
- updateFailedStateForMember(groupPk, member, false);
- // we have to reset the error state so that the UI shows "sending" even if the last try failed.
- await MetaGroupWrapperActions.memberSetInvited(groupPk, member, false);
- } catch (e) {
- window.log.warn('GroupInviteJob memberSetInvited (before) failed with', e.message);
- }
window?.inboxStore?.dispatch(groupInfoActions.refreshGroupDetailsFromWrapper({ groupPk }));
+ await LibSessionUtil.saveDumpsToDb(groupPk);
await runners.groupInviteJobRunner.addJob(groupInviteJob);
@@ -71,21 +67,27 @@ async function addJob({ groupPk, member }: JobExtraArgs) {
function displayFailedInvitesForGroup(groupPk: GroupPubkeyType) {
const thisGroupFailures = invitesFailed.get(groupPk);
+
if (!thisGroupFailures || thisGroupFailures.failedMembers.length === 0) {
return;
}
+ const onToastClick = () => {
+ void showUpdateGroupMembersByConvoId(groupPk);
+ };
const count = thisGroupFailures.failedMembers.length;
switch (count) {
case 1:
ToastUtils.pushToastWarning(
`invite-failed${groupPk}`,
- window.i18n('groupInviteFailedOne', [...thisGroupFailures.failedMembers, groupPk])
+ window.i18n('groupInviteFailedOne', [...thisGroupFailures.failedMembers, groupPk]),
+ onToastClick
);
break;
case 2:
ToastUtils.pushToastWarning(
`invite-failed${groupPk}`,
- window.i18n('groupInviteFailedTwo', [...thisGroupFailures.failedMembers, groupPk])
+ window.i18n('groupInviteFailedTwo', [...thisGroupFailures.failedMembers, groupPk]),
+ onToastClick
);
break;
default:
@@ -95,7 +97,8 @@ function displayFailedInvitesForGroup(groupPk: GroupPubkeyType) {
thisGroupFailures.failedMembers[0],
`${thisGroupFailures.failedMembers.length - 1}`,
groupPk,
- ])
+ ]),
+ onToastClick
);
}
// toast was displayed empty the list
@@ -169,7 +172,11 @@ class GroupInviteJob extends PersistedJob {
window.log.warn(
`${jobType} with groupPk:"${groupPk}" member: ${member} id:"${identifier}" failed with ${e.message}`
);
+ failed = true;
} finally {
+ window.log.info(
+ `${jobType} with groupPk:"${groupPk}" member: ${member} id:"${identifier}" finished. failed:${failed}`
+ );
try {
await MetaGroupWrapperActions.memberSetInvited(groupPk, member, failed);
} catch (e) {
@@ -177,7 +184,11 @@ class GroupInviteJob extends PersistedJob {
}
updateFailedStateForMember(groupPk, member, failed);
+ window?.inboxStore?.dispatch(
+ groupInfoActions.setInvitePending({ groupPk, pubkey: member, sending: false })
+ );
window?.inboxStore?.dispatch(groupInfoActions.refreshGroupDetailsFromWrapper({ groupPk }));
+ await LibSessionUtil.saveDumpsToDb(groupPk);
}
// return true so this job is marked as a success and we don't need to retry it
return RunJobResult.Success;
diff --git a/ts/session/utils/job_runners/jobs/GroupSyncJob.ts b/ts/session/utils/job_runners/jobs/GroupSyncJob.ts
index e8066ad94..5dd00b59d 100644
--- a/ts/session/utils/job_runners/jobs/GroupSyncJob.ts
+++ b/ts/session/utils/job_runners/jobs/GroupSyncJob.ts
@@ -13,6 +13,8 @@ import {
DeleteAllFromGroupMsgNodeSubRequest,
StoreGroupKeysSubRequest,
StoreGroupMessageSubRequest,
+ SubaccountRevokeSubRequest,
+ SubaccountUnrevokeSubRequest,
} from '../../../apis/snode_api/SnodeRequestTypes';
import { DeleteGroupHashesFactory } from '../../../apis/snode_api/factories/DeleteGroupHashesRequestFactory';
import { StoreGroupRequestFactory } from '../../../apis/snode_api/factories/StoreGroupRequestFactory';
@@ -138,6 +140,13 @@ async function pushChangesToGroupSwarmIfNeeded({
deleteAllMessagesSubRequest,
]);
+ const extraRequestWithExpectedResults = extraRequests.filter(
+ m =>
+ m instanceof SubaccountRevokeSubRequest ||
+ m instanceof SubaccountUnrevokeSubRequest ||
+ m instanceof DeleteAllFromGroupMsgNodeSubRequest
+ );
+
const result = await MessageSender.sendEncryptedDataToSnode({
// Note: this is on purpose that supplementalKeysSubRequest is before pendingConfigRequests
// as this is to avoid a race condition where a device is polling right
@@ -155,11 +164,8 @@ async function pushChangesToGroupSwarmIfNeeded({
const expectedReplyLength =
pendingConfigRequests.length + // each of those are sent as a subrequest
(supplementalKeysSubRequest ? 1 : 0) + // we are sending all the supplemental keys as a single subrequest
- (deleteHashesSubRequest ? 1 : 0) + // we are sending all hashes changes as a single subrequest
- (revokeSubRequest ? 1 : 0) + // we are sending all revoke updates as a single subrequest
- (unrevokeSubRequest ? 1 : 0) + // we are sending all revoke updates as a single subrequest
- (deleteAllMessagesSubRequest ? 1 : 0) + // a delete_all sub request is a single subrequest
- (extraStoreRequests ? 1 : 0); // each of those are sent as a subrequest
+ (extraStoreRequests ? 1 : 0) + // each of those are sent as a subrequest
+ extraRequestWithExpectedResults.length; // each of those are sent as a subrequest, but they don't all return something...
// we do a sequence call here. If we do not have the right expected number of results, consider it a failure
if (!isArray(result) || result.length !== expectedReplyLength) {
diff --git a/ts/session/utils/libsession/libsession_utils.ts b/ts/session/utils/libsession/libsession_utils.ts
index 944f875e5..0712c1ddb 100644
--- a/ts/session/utils/libsession/libsession_utils.ts
+++ b/ts/session/utils/libsession/libsession_utils.ts
@@ -351,6 +351,8 @@ async function saveDumpsToDb(pubkey: PubkeyType | GroupPubkeyType) {
const metaNeedsDump = await MetaGroupWrapperActions.needsDump(pubkey);
// save the concatenated dumps as a single entry in the DB if any of the dumps had a need for dump
if (metaNeedsDump) {
+ window.log.debug(`About to make and save dumps for metagroup ${ed25519Str(pubkey)}`);
+
const dump = await MetaGroupWrapperActions.metaDump(pubkey);
await ConfigDumpData.saveConfigDump({
data: dump,
@@ -358,7 +360,7 @@ async function saveDumpsToDb(pubkey: PubkeyType | GroupPubkeyType) {
variant: `MetaGroupConfig-${pubkey}`,
});
- window.log.debug(`Saved dumps for metagroup ${ed25519Str(pubkey)}`);
+ window.log.info(`Saved dumps for metagroup ${ed25519Str(pubkey)}`);
} else {
window.log.debug(`No need to update local dumps for metagroup ${ed25519Str(pubkey)}`);
}