Merge pull request #1000 from msgmaxim/onion-requests

Client-side implementation for onion requests
pull/1009/head
Maxim Shishmarev 5 years ago committed by GitHub
commit a91d925384
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -98,6 +98,8 @@ module.exports = {
getAllSessions,
getSwarmNodesByPubkey,
getGuardNodes,
updateGuardNodes,
getConversationCount,
saveConversation,
@ -807,6 +809,7 @@ async function updateSchema(instance) {
const LOKI_SCHEMA_VERSIONS = [
updateToLokiSchemaVersion1,
updateToLokiSchemaVersion2,
updateToLokiSchemaVersion3,
];
async function updateToLokiSchemaVersion1(currentVersion, instance) {
@ -975,6 +978,33 @@ async function updateToLokiSchemaVersion2(currentVersion, instance) {
console.log('updateToLokiSchemaVersion2: success!');
}
async function updateToLokiSchemaVersion3(currentVersion, instance) {
if (currentVersion >= 3) {
return;
}
await instance.run(
`CREATE TABLE ${GUARD_NODE_TABLE}(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
ed25519PubKey VARCHAR(64)
);`
);
console.log('updateToLokiSchemaVersion3: starting...');
await instance.run('BEGIN TRANSACTION;');
await instance.run(
`INSERT INTO loki_schema (
version
) values (
3
);`
);
await instance.run('COMMIT TRANSACTION;');
console.log('updateToLokiSchemaVersion3: success!');
}
async function updateLokiSchema(instance) {
const result = await instance.get(
"SELECT name FROM sqlite_master WHERE type = 'table' AND name='loki_schema';"
@ -1400,6 +1430,9 @@ async function removeAllSignedPreKeys() {
}
const PAIRING_AUTHORISATIONS_TABLE = 'pairingAuthorisations';
const GUARD_NODE_TABLE = 'guardNodes';
async function getAuthorisationForSecondaryPubKey(pubKey, options) {
const granted = options && options.granted;
let filter = '';
@ -1470,6 +1503,37 @@ async function getSecondaryDevicesFor(primaryDevicePubKey) {
return map(authorisations, row => row.secondaryDevicePubKey);
}
async function getGuardNodes() {
const nodes = await db.all(`SELECT ed25519PubKey FROM ${GUARD_NODE_TABLE};`);
if (!nodes) {
return null;
}
return nodes;
}
async function updateGuardNodes(nodes) {
await db.run('BEGIN TRANSACTION;');
await db.run(`DELETE FROM ${GUARD_NODE_TABLE}`);
await Promise.all(
nodes.map(edkey =>
db.run(
`INSERT INTO ${GUARD_NODE_TABLE} (
ed25519PubKey
) values ($ed25519PubKey)`,
{
$ed25519PubKey: edkey,
}
)
)
);
await db.run('END TRANSACTION;');
}
async function getPrimaryDeviceFor(secondaryDevicePubKey) {
const row = await db.get(
`SELECT primaryDevicePubKey FROM ${PAIRING_AUTHORISATIONS_TABLE} WHERE secondaryDevicePubKey = $secondaryDevicePubKey AND isGranted = 1;`,

@ -1440,6 +1440,9 @@
async function connect(firstRun) {
window.log.info('connect');
// Initialize paths for onion requests
await window.lokiSnodeAPI.buildNewOnionPaths();
// Bootstrap our online/offline detection, only the first time we connect
if (connectCount === 0 && navigator.onLine) {
window.addEventListener('offline', onOffline);

@ -101,6 +101,9 @@ module.exports = {
getPrimaryDeviceFor,
getPairedDevicesFor,
getGuardNodes,
updateGuardNodes,
createOrUpdateItem,
getItemById,
getAllItems,
@ -117,6 +120,7 @@ module.exports = {
removeAllSessions,
getAllSessions,
// Doesn't look like this is used at all
getSwarmNodesByPubkey,
getConversationCount,
@ -647,6 +651,14 @@ function getSecondaryDevicesFor(primaryDevicePubKey) {
return channels.getSecondaryDevicesFor(primaryDevicePubKey);
}
function getGuardNodes() {
return channels.getGuardNodes();
}
function updateGuardNodes(nodes) {
return channels.updateGuardNodes(nodes);
}
function getPrimaryDeviceFor(secondaryDevicePubKey) {
return channels.getPrimaryDeviceFor(secondaryDevicePubKey);
}

@ -7,7 +7,6 @@ const { lokiRpc } = require('./loki_rpc');
const DEFAULT_CONNECTIONS = 3;
const MAX_ACCEPTABLE_FAILURES = 1;
const LOKI_LONGPOLL_HEADER = 'X-Loki-Long-Poll';
function sleepFor(time) {
return new Promise(resolve => {
@ -283,8 +282,6 @@ class LokiMessageAPI {
!stopPollingResult &&
successiveFailures < MAX_ACCEPTABLE_FAILURES
) {
await sleepFor(successiveFailures * 1000);
// TODO: Revert back to using snode address instead of IP
try {
// in general, I think we want exceptions to bubble up
@ -337,6 +334,9 @@ class LokiMessageAPI {
}
successiveFailures += 1;
}
// Always wait a bit as we are no longer long-polling
await sleepFor(Math.max(successiveFailures, 2) * 1000);
}
if (successiveFailures >= MAX_ACCEPTABLE_FAILURES) {
const remainingSwarmSnodes = await lokiSnodeAPI.unreachableNode(
@ -374,9 +374,6 @@ class LokiMessageAPI {
const options = {
timeout: 40000,
ourPubKey: this.ourKey,
headers: {
[LOKI_LONGPOLL_HEADER]: true,
},
};
// let exceptions bubble up

@ -1,5 +1,5 @@
/* global log, libloki, textsecure, getStoragePubKey, lokiSnodeAPI, StringView,
libsignal, window, TextDecoder, TextEncoder, dcodeIO, process */
libsignal, window, TextDecoder, TextEncoder, dcodeIO, process, crypto */
const nodeFetch = require('node-fetch');
const https = require('https');
@ -12,6 +12,9 @@ const snodeHttpsAgent = new https.Agent({
const LOKI_EPHEMKEY_HEADER = 'X-Loki-EphemKey';
const endpointBase = '/storage_rpc/v1';
// Request index for debugging
let onionReqIdx = 0;
const decryptResponse = async (response, address) => {
let plaintext = false;
try {
@ -31,8 +34,210 @@ const decryptResponse = async (response, address) => {
const timeoutDelay = ms => new Promise(resolve => setTimeout(resolve, ms));
const encryptForNode = async (node, payload) => {
const textEncoder = new TextEncoder();
const plaintext = textEncoder.encode(payload);
const ephemeral = libloki.crypto.generateEphemeralKeyPair();
const snPubkey = StringView.hexToArrayBuffer(node.pubkey_x25519);
const ephemeralSecret = libsignal.Curve.calculateAgreement(
snPubkey,
ephemeral.privKey
);
const salt = window.Signal.Crypto.bytesFromString('LOKI');
const key = await crypto.subtle.importKey(
'raw',
salt,
{ name: 'HMAC', hash: { name: 'SHA-256' } },
false,
['sign']
);
const symmetricKey = await crypto.subtle.sign(
{ name: 'HMAC', hash: 'SHA-256' },
key,
ephemeralSecret
);
const ciphertext = await window.libloki.crypto.EncryptGCM(
symmetricKey,
plaintext
);
return { ciphertext, symmetricKey, ephemeral_key: ephemeral.pubKey };
};
// Returns the actual ciphertext, symmetric key that will be used
// for decryption, and an ephemeral_key to send to the next hop
const encryptForDestination = async (node, payload) => {
// Do we still need "headers"?
const reqStr = JSON.stringify({ body: payload, headers: '' });
return encryptForNode(node, reqStr);
};
// `ctx` holds info used by `node` to relay further
const encryptForRelay = async (node, nextNode, ctx) => {
const payload = ctx.ciphertext;
const reqJson = {
ciphertext: dcodeIO.ByteBuffer.wrap(payload).toString('base64'),
ephemeral_key: StringView.arrayBufferToHex(ctx.ephemeral_key),
destination: nextNode.pubkey_ed25519,
};
const reqStr = JSON.stringify(reqJson);
return encryptForNode(node, reqStr);
};
const BAD_PATH = 'bad_path';
// May return false BAD_PATH, indicating that we should try a new
const sendOnionRequest = async (reqIdx, nodePath, targetNode, plaintext) => {
log.info('Sending an onion request');
const ctx1 = await encryptForDestination(targetNode, plaintext);
const ctx2 = await encryptForRelay(nodePath[2], targetNode, ctx1);
const ctx3 = await encryptForRelay(nodePath[1], nodePath[2], ctx2);
const ctx4 = await encryptForRelay(nodePath[0], nodePath[1], ctx3);
const ciphertextBase64 = dcodeIO.ByteBuffer.wrap(ctx4.ciphertext).toString(
'base64'
);
const payload = {
ciphertext: ciphertextBase64,
ephemeral_key: StringView.arrayBufferToHex(ctx4.ephemeral_key),
};
const fetchOptions = {
method: 'POST',
body: JSON.stringify(payload),
};
const url = `https://${nodePath[0].ip}:${nodePath[0].port}/onion_req`;
// we only proxy to snodes...
process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0';
const response = await nodeFetch(url, fetchOptions);
process.env.NODE_TLS_REJECT_UNAUTHORIZED = '1';
return processOnionResponse(reqIdx, response, ctx1.symmetricKey, true);
};
// Process a response as it arrives from `nodeFetch`, handling
// http errors and attempting to decrypt the body with `sharedKey`
const processOnionResponse = async (reqIdx, response, sharedKey, useAesGcm) => {
log.info(`(${reqIdx}) [path] processing onion response`);
// detect SNode is not ready (not in swarm; not done syncing)
if (response.status === 503) {
log.warn('Got 503: snode not ready');
return BAD_PATH;
}
if (response.status === 504) {
log.warn('Got 504: Gateway timeout');
return BAD_PATH;
}
if (response.status === 404) {
// Why would we get this error on testnet?
log.warn('Got 404: Gateway timeout');
return BAD_PATH;
}
if (response.status !== 200) {
log.warn(
'lokiRpc sendToProxy fetch unhandled error code:',
response.status
);
return false;
}
const ciphertext = await response.text();
if (!ciphertext) {
log.warn('[path]: Target node return empty ciphertext');
return false;
}
let plaintext;
let ciphertextBuffer;
try {
ciphertextBuffer = dcodeIO.ByteBuffer.wrap(
ciphertext,
'base64'
).toArrayBuffer();
const decryptFn = useAesGcm
? window.libloki.crypto.DecryptGCM
: window.libloki.crypto.DHDecrypt;
const plaintextBuffer = await decryptFn(sharedKey, ciphertextBuffer);
const textDecoder = new TextDecoder();
plaintext = textDecoder.decode(plaintextBuffer);
} catch (e) {
log.error(`(${reqIdx}) lokiRpc sendToProxy decode error`);
if (ciphertextBuffer) {
log.error('ciphertextBuffer', ciphertextBuffer);
}
return false;
}
try {
const jsonRes = JSON.parse(plaintext);
// emulate nodeFetch response...
jsonRes.json = () => {
try {
const res = JSON.parse(jsonRes.body);
return res;
} catch (e) {
log.error(
`(${reqIdx}) lokiRpc sendToProxy parse error json: `,
jsonRes.body
);
}
return false;
};
return jsonRes;
} catch (e) {
log.error(
'lokiRpc sendToProxy parse error',
e.code,
e.message,
`json:`,
plaintext
);
return false;
}
};
const sendToProxy = async (options = {}, targetNode, retryNumber = 0) => {
const randSnode = await lokiSnodeAPI.getRandomSnodeAddress();
const _ = window.Lodash;
const snodePool = await lokiSnodeAPI.getRandomSnodePool();
if (snodePool.length < 2) {
log.error(
'Not enough service nodes for a proxy request, only have: ',
snodePool.length
);
return false;
}
// Making sure the proxy node is not the same as the target node:
const snodePoolSafe = _.without(
snodePool,
_.find(snodePool, { pubkey_ed25519: targetNode.pubkey_ed25519 })
);
const randSnode = window.Lodash.sample(snodePoolSafe);
// Don't allow arbitrary URLs, only snodes and loki servers
const url = `https://${randSnode.ip}:${randSnode.port}/proxy`;
@ -262,6 +467,43 @@ const lokiFetch = async (url, options = {}, targetNode = null) => {
};
try {
// Absence of targetNode indicates that we want a direct connection
// (e.g. to connect to a seed node for the first time)
if (window.lokiFeatureFlags.useOnionRequests && targetNode) {
// Loop until the result is not BAD_PATH
// eslint-disable-next-line no-constant-condition
while (true) {
// Get a path excluding `targetNode`:
// eslint-disable-next-line no-await-in-loop
const path = await lokiSnodeAPI.getOnionPath(targetNode);
const thisIdx = onionReqIdx;
onionReqIdx += 1;
log.info(
`(${thisIdx}) using path ${path[0].ip}:${path[0].port} -> ${
path[1].ip
}:${path[1].port} -> ${path[2].ip}:${path[2].port} => ${
targetNode.ip
}:${targetNode.port}`
);
// eslint-disable-next-line no-await-in-loop
const result = await sendOnionRequest(
thisIdx,
path,
targetNode,
fetchOptions.body
);
if (result === BAD_PATH) {
log.error('[path] Error on the path');
lokiSnodeAPI.markPathAsBad(path);
} else {
return result ? result.json() : false;
}
}
}
if (window.lokiFeatureFlags.useSnodeProxy && targetNode) {
const result = await sendToProxy(fetchOptions, targetNode);
// if not result, maybe we should throw??
@ -332,6 +574,7 @@ const lokiFetch = async (url, options = {}, targetNode = null) => {
};
// Wrapper for a JSON RPC request
// Annoyngly, this is used for Lokid requests too
const lokiRpc = (
address,
port,

@ -1,11 +1,11 @@
/* eslint-disable class-methods-use-this */
/* global window, ConversationController, _, log, clearTimeout */
/* global window, textsecure, ConversationController, _, log, clearTimeout, process */
const is = require('@sindresorhus/is');
const { lokiRpc } = require('./loki_rpc');
const nodeFetch = require('node-fetch');
const RANDOM_SNODES_TO_USE_FOR_PUBKEY_SWARM = 3;
const RANDOM_SNODES_POOL_SIZE = 1024;
const SEED_NODE_RETRIES = 3;
class LokiSnodeAPI {
@ -18,6 +18,226 @@ class LokiSnodeAPI {
this.randomSnodePool = [];
this.swarmsPendingReplenish = {};
this.refreshRandomPoolPromise = false;
this.onionPaths = [];
this.guardNodes = [];
}
async getRandomSnodePool() {
if (this.randomSnodePool.length === 0) {
await this.refreshRandomPool();
}
return this.randomSnodePool;
}
async testGuardNode(snode) {
log.info('Testing a candidate guard node ', snode);
// Send a post request and make sure it is OK
const endpoint = '/storage_rpc/v1';
const url = `https://${snode.ip}:${snode.port}${endpoint}`;
const ourPK = textsecure.storage.user.getNumber();
const pubKey = window.getStoragePubKey(ourPK); // truncate if testnet
const method = 'get_snodes_for_pubkey';
const params = { pubKey };
const body = {
jsonrpc: '2.0',
id: '0',
method,
params,
};
const fetchOptions = {
method: 'POST',
body: JSON.stringify(body),
headers: { 'Content-Type': 'application/json' },
timeout: 10000, // 10s, we want a smaller timeout for testing
};
process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0';
let response;
try {
response = await nodeFetch(url, fetchOptions);
} catch (e) {
if (e.type === 'request-timeout') {
log.warn(`test timeout for node,`, snode);
}
return false;
} finally {
process.env.NODE_TLS_REJECT_UNAUTHORIZED = '1';
}
if (!response.ok) {
log.info(`Node failed the guard test:`, snode);
}
return response.ok;
}
async selectGuardNodes() {
const _ = window.Lodash;
const nodePool = await this.getRandomSnodePool();
if (nodePool.length === 0) {
log.error(`Could not select guarn nodes: node pool is empty`);
return [];
}
const shuffled = _.shuffle(nodePool);
let guardNodes = [];
const DESIRED_GUARD_COUNT = 3;
// The use of await inside while is intentional:
// we only want to repeat if the await fails
// eslint-disable-next-line-no-await-in-loop
while (guardNodes.length < 3) {
if (shuffled.length < DESIRED_GUARD_COUNT) {
log.error(`Not enought nodes in the pool`);
break;
}
const candidateNodes = shuffled.splice(0, DESIRED_GUARD_COUNT);
// Test all three nodes at once
// eslint-disable-next-line no-await-in-loop
const idxOk = await Promise.all(
candidateNodes.map(n => this.testGuardNode(n))
);
const goodNodes = _.zip(idxOk, candidateNodes)
.filter(x => x[0])
.map(x => x[1]);
guardNodes = _.concat(guardNodes, goodNodes);
}
if (guardNodes.length < DESIRED_GUARD_COUNT) {
log.error(
`COULD NOT get enough guard nodes, only have: ${guardNodes.length}`
);
}
log.info('new guard nodes: ', guardNodes);
const edKeys = guardNodes.map(n => n.pubkey_ed25519);
await window.libloki.storage.updateGuardNodes(edKeys);
return guardNodes;
}
async getOnionPath(toExclude = null) {
const _ = window.Lodash;
const goodPaths = this.onionPaths.filter(x => !x.bad);
if (goodPaths.length < 2) {
log.error(
`Must have at least 2 good onion paths, actual: ${goodPaths.length}`
);
await this.buildNewOnionPaths();
}
const paths = _.shuffle(goodPaths);
if (!toExclude) {
return paths[0];
}
// Select a path that doesn't contain `toExclude`
const otherPaths = paths.filter(
path =>
!_.some(path, node => node.pubkey_ed25519 === toExclude.pubkey_ed25519)
);
if (otherPaths.length === 0) {
// This should never happen!
throw new Error('No onion paths available after filtering');
}
return otherPaths[0].path;
}
async markPathAsBad(path) {
this.onionPaths.forEach(p => {
if (p.path === path) {
// eslint-disable-next-line no-param-reassign
p.bad = true;
}
});
}
async buildNewOnionPaths() {
// Note: this function may be called concurrently, so
// might consider blocking the other calls
const _ = window.Lodash;
log.info('building new onion paths');
const allNodes = await this.getRandomSnodePool();
if (this.guardNodes.length === 0) {
// Not cached, load from DB
const nodes = await window.libloki.storage.getGuardNodes();
if (nodes.length === 0) {
log.warn('no guard nodes in DB. Will be selecting new guards nodes...');
} else {
// We only store the nodes' keys, need to find full entries:
const edKeys = nodes.map(x => x.ed25519PubKey);
this.guardNodes = allNodes.filter(
x => edKeys.indexOf(x.pubkey_ed25519) !== -1
);
if (this.guardNodes.length < edKeys.length) {
log.warn(
`could not find some guard nodes: ${this.guardNodes.length}/${
edKeys.length
}`
);
}
}
// If guard nodes is still empty (the old nodes are now invalid), select new ones:
if (this.guardNodes.length === 0) {
this.guardNodes = await this.selectGuardNodes();
}
}
// TODO: select one guard node and 2 other nodes randomly
let otherNodes = _.difference(allNodes, this.guardNodes);
if (otherNodes.length < 2) {
log.error('Too few nodes to build an onion path!');
return;
}
otherNodes = _.shuffle(otherNodes);
const guards = _.shuffle(this.guardNodes);
// Create path for every guard node:
// Each path needs 2 nodes in addition to the guard node:
const maxPath = Math.floor(Math.min(guards.length, otherNodes.length / 2));
// TODO: might want to keep some of the existing paths
this.onionPaths = [];
for (let i = 0; i < maxPath; i += 1) {
const path = [guards[i], otherNodes[i * 2], otherNodes[i * 2 + 1]];
this.onionPaths.push({ path, bad: false });
}
log.info('Built onion paths: ', this.onionPaths);
}
async getRandomSnodeAddress() {
@ -42,8 +262,10 @@ class LokiSnodeAPI {
let timeoutTimer = null;
// private retry container
const trySeedNode = async (consecutiveErrors = 0) => {
// Removed limit until there is a way to get snode info
// for individual nodes (needed for guard nodes); this way
// we get all active nodes
const params = {
limit: RANDOM_SNODES_POOL_SIZE,
active_only: true,
fields: {
public_ip: true,

@ -17,6 +17,7 @@
class FallBackDecryptionError extends Error {}
const IV_LENGTH = 16;
const NONCE_LENGTH = 12;
async function DHEncrypt(symmetricKey, plainText) {
const iv = libsignal.crypto.getRandomBytes(IV_LENGTH);
@ -33,6 +34,52 @@
return ivAndCiphertext;
}
async function EncryptGCM(symmetricKey, plaintext) {
const nonce = crypto.getRandomValues(new Uint8Array(NONCE_LENGTH));
const key = await crypto.subtle.importKey(
'raw',
symmetricKey,
{ name: 'AES-GCM' },
false,
['encrypt']
);
const ciphertext = await crypto.subtle.encrypt(
{ name: 'AES-GCM', iv: nonce, tagLength: 128 },
key,
plaintext
);
const ivAndCiphertext = new Uint8Array(
NONCE_LENGTH + ciphertext.byteLength
);
ivAndCiphertext.set(nonce);
ivAndCiphertext.set(new Uint8Array(ciphertext), nonce.byteLength);
return ivAndCiphertext;
}
async function DecryptGCM(symmetricKey, ivAndCiphertext) {
const nonce = ivAndCiphertext.slice(0, NONCE_LENGTH);
const ciphertext = ivAndCiphertext.slice(NONCE_LENGTH);
const key = await crypto.subtle.importKey(
'raw',
symmetricKey,
{ name: 'AES-GCM' },
false,
['decrypt']
);
return crypto.subtle.decrypt(
{ name: 'AES-GCM', iv: nonce },
key,
ciphertext
);
}
async function DHDecrypt(symmetricKey, ivAndCiphertext) {
const iv = ivAndCiphertext.slice(0, IV_LENGTH);
const ciphertext = ivAndCiphertext.slice(IV_LENGTH);
@ -106,11 +153,16 @@
return Multibase.decode(`${base32zCode}${snodeAddressClean}`);
}
function generateEphemeralKeyPair() {
const keys = libsignal.Curve.generateKeyPair();
// Signal protocol prepends with "0x05"
keys.pubKey = keys.pubKey.slice(1);
return keys;
}
class LokiSnodeChannel {
constructor() {
this._ephemeralKeyPair = libsignal.Curve.generateKeyPair();
// Signal protocol prepends with "0x05"
this._ephemeralKeyPair.pubKey = this._ephemeralKeyPair.pubKey.slice(1);
this._ephemeralKeyPair = generateEphemeralKeyPair();
this._ephemeralPubKeyHex = StringView.arrayBufferToHex(
this._ephemeralKeyPair.pubKey
);
@ -474,7 +526,9 @@
window.libloki.crypto = {
DHEncrypt,
EncryptGCM, // AES-GCM
DHDecrypt,
DecryptGCM, // AES-GCM
FallBackSessionCipher,
FallBackDecryptionError,
snodeCipher,
@ -485,6 +539,7 @@
validateAuthorisation,
PairingType,
LokiSessionCipher,
generateEphemeralKeyPair,
// for testing
_LokiSnodeChannel: LokiSnodeChannel,
_decodeSnodeAddressToPubKey: decodeSnodeAddressToPubKey,

@ -240,6 +240,14 @@
return window.Signal.Data.getSecondaryDevicesFor(primaryDevicePubKey);
}
function getGuardNodes() {
return window.Signal.Data.getGuardNodes();
}
function updateGuardNodes(nodes) {
return window.Signal.Data.updateGuardNodes(nodes);
}
async function getAllDevicePubKeysForPrimaryPubKey(primaryDevicePubKey) {
await saveAllPairingAuthorisationsFor(primaryDevicePubKey);
const secondaryPubKeys =
@ -265,6 +273,8 @@
getAllDevicePubKeysForPrimaryPubKey,
getSecondaryDevicesFor,
getPrimaryDeviceMapping,
getGuardNodes,
updateGuardNodes,
};
// Libloki protocol store

@ -26,6 +26,7 @@
: 0;
},
// This is not a "standard" base64, do not use!
base64ToBytes(sBase64, nBlocksSize) {
const sB64Enc = sBase64.replace(/[^A-Za-z0-9+/]/g, '');
const nInLen = sB64Enc.length;

@ -423,6 +423,7 @@ window.lokiFeatureFlags = {
privateGroupChats: true,
useSnodeProxy: !process.env.USE_STUBBED_NETWORK,
useSealedSender: true,
useOnionRequests: false,
};
// eslint-disable-next-line no-extend-native,func-names
@ -431,7 +432,11 @@ Promise.prototype.ignore = function() {
this.then(() => {});
};
if (config.environment.includes('test')) {
if (
config.environment.includes('test') &&
!config.environment === 'swarm-testing1' &&
!config.environment === 'swarm-testing2'
) {
const isWindows = process.platform === 'win32';
/* eslint-disable global-require, import/no-extraneous-dependencies */
window.test = {
@ -451,5 +456,6 @@ if (config.environment.includes('test')) {
updateSwarmNodes: () => {},
updateLastHash: () => {},
getSwarmNodesForPubKey: () => [],
buildNewOnionPaths: () => [],
};
}

Loading…
Cancel
Save