`Something is wrong with the firstExpirationResult: ${JSON.stringify(
JSON.stringify(firstExpirationResult)
)}`
);
}
if(!expiry||!messageHash){
thrownewError(
`Something is wrong with the firstExpirationResult: ${JSON.stringify(
JSON.stringify(firstExpirationResult)
)}`
);
}
// window.log.debug(
// `WIP: [expireOnNodes] Success!\nHere are the results from one of the snodes.\nmessageHash: ${messageHash} \nexpiry: ${expiry} \nexpires at: ${new Date(
// `WIP: [expireOnNodes] Success!\nHere are the results from one of the snodes.\nmessageHash: ${messageHash} \nexpiry: ${expiry} \nexpires at: ${new Date(
window?.log?.warn('WIP: [expireOnNodes] Failed to parse "swarm" result: ',e);
}
returnnull;
}catch(e){
returnexpiry;
}catch(err){
window?.log?.warn(
'WIP: [expireOnNodes] - send error:',
e,
'WIP: [expireOnNodes]',
err.message||err,
`destination ${targetNode.ip}:${targetNode.port}`
);
throwe;
// NOTE batch requests have their own retry logic which includes abort errors that will break our retry logic so we need to catch them and throw regular errors
if(errinstanceofpRetry.AbortError){
throwError(err.message);
}
throwerr;
}
}
@ -299,38 +294,19 @@ export async function expireMessageOnSnode(
letsnode: Snode|undefined;
awaitpRetry(
async()=>{
constswarm=awaitgetSwarmFor(ourPubKey);
snode=sample(swarm);
if(!snode){
thrownewEmptySwarmError(ourPubKey,'Ran out of swarm nodes to query');