diff --git a/.gitignore b/.gitignore
index 306c1fc2b..70fd182c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -119,3 +119,4 @@ dist
# IDE
.history
+.idea
diff --git a/README.md b/README.md
index 80a7a630c..8f051d128 100644
--- a/README.md
+++ b/README.md
@@ -121,6 +121,7 @@ Enkrypt is a web3 wallet built from the ground up to support the multi-chain fut
- Unit Zero Testnet
- Nibiru
- Nibiru Testnet
+- Firo
- More coming soon!
Looking to add your project? [Contact us!](https://mewwallet.typeform.com/enkrypt-inquiry?typeform-source=www.enkrypt.com)
diff --git a/package.json b/package.json
index 5ac1606c1..e6a31112a 100644
--- a/package.json
+++ b/package.json
@@ -48,5 +48,8 @@
"@ledgerhq/speculos-transport": "https://registry.yarnpkg.com/@favware/skip-dependency/-/skip-dependency-1.2.1.tgz",
"@ledgerhq/ledger-key-ring-protocol": "https://registry.yarnpkg.com/@favware/skip-dependency/-/skip-dependency-1.2.1.tgz",
"@amplitude/plugin-autocapture-browser@^1.0.2": "patch:@amplitude/plugin-autocapture-browser@npm%3A1.0.3#./.yarn/patches/@amplitude-plugin-autocapture-browser-npm-1.0.3-edb25bef55.patch"
+ },
+ "dependencies": {
+ "ecpair": "3.0.0-rc.0"
}
}
diff --git a/packages/extension/.prettierignore b/packages/extension/.prettierignore
new file mode 100644
index 000000000..88783bfdb
--- /dev/null
+++ b/packages/extension/.prettierignore
@@ -0,0 +1,2 @@
+**/*.js
+**/*.d.ts
\ No newline at end of file
diff --git a/packages/extension/package.json b/packages/extension/package.json
index 84b96dada..344764015 100644
--- a/packages/extension/package.json
+++ b/packages/extension/package.json
@@ -24,6 +24,7 @@
},
"dependencies": {
"@amplitude/analytics-browser": "^2.32.0",
+ "@bitcoinerlab/secp256k1": "^1.2.0",
"@enkryptcom/extension-bridge": "workspace:^",
"@enkryptcom/hw-wallets": "workspace:^",
"@enkryptcom/keyring": "workspace:^",
@@ -65,6 +66,8 @@
"bs58": "^6.0.0",
"concurrently": "^9.2.1",
"echarts": "^6.0.0",
+ "ecpair": "^3.0.0",
+ "electrum-client-browser": "1.2.5",
"ethereum-cryptography": "^2.2.1",
"ethereumjs-abi": "^0.6.8",
"eventemitter3": "^5.0.1",
diff --git a/packages/extension/src/libs/background/index.ts b/packages/extension/src/libs/background/index.ts
index a888c2290..41f837d30 100644
--- a/packages/extension/src/libs/background/index.ts
+++ b/packages/extension/src/libs/background/index.ts
@@ -1,25 +1,25 @@
+import DomainState from '@/libs/domain-state';
+import { sendToWindow } from '@/libs/messenger/extension';
+import PersistentEvents from '@/libs/persistent-events';
+import TabInfo from '@/libs/utils/tab-info';
+import Providers from '@/providers';
+import { BaseFiroWallet } from '@/providers/bitcoin/libs/firo-wallet/base-firo-wallet';
import {
InternalMethods,
InternalOnMessageResponse,
Message,
} from '@/types/messenger';
-import { RPCRequestType, OnMessageResponse } from '@enkryptcom/types';
+import { ProviderName } from '@/types/provider';
+import { OnMessageResponse, RPCRequestType } from '@enkryptcom/types';
import { v4 as randomUUID } from 'uuid';
+import Browser from 'webextension-polyfill';
import { getCustomError } from '../error';
import KeyRingBase from '../keyring/keyring';
-import { sendToWindow } from '@/libs/messenger/extension';
-import { ProviderName } from '@/types/provider';
-import Providers from '@/providers';
-import Browser from 'webextension-polyfill';
-import TabInfo from '@/libs/utils/tab-info';
-import PersistentEvents from '@/libs/persistent-events';
-import DomainState from '@/libs/domain-state';
-import { TabProviderType, ProviderType, ExternalMessageOptions } from './types';
import { getProviderNetworkByName } from '../utils/networks';
import {
- sign,
- getEthereumPubKey,
ethereumDecrypt,
+ getEthereumPubKey,
+ sign,
unlock,
changeNetwork,
sendToTab,
@@ -29,9 +29,11 @@ import {
import { handlePersistentEvents } from './external';
import SettingsState from '../settings-state';
import { isGeoRestricted } from '../utils/screening';
+import { ExternalMessageOptions, ProviderType, TabProviderType } from './types';
class BackgroundHandler {
#keyring: KeyRingBase;
+ #wallet: BaseFiroWallet;
#tabProviders: TabProviderType;
#providers: ProviderType;
#persistentEvents: PersistentEvents;
@@ -41,6 +43,7 @@ class BackgroundHandler {
constructor() {
this.#keyring = new KeyRingBase();
+ this.#wallet = new BaseFiroWallet();
this.#persistentEvents = new PersistentEvents();
this.#domainState = new DomainState();
this.#settingsState = new SettingsState();
@@ -162,7 +165,7 @@ class BackgroundHandler {
return response;
});
}
- internalHandler(msg: Message): Promise {
+ async internalHandler(msg: Message): Promise {
const message = JSON.parse(msg.message) as RPCRequestType;
switch (message.method) {
case InternalMethods.sign:
@@ -172,6 +175,9 @@ class BackgroundHandler {
case InternalMethods.ethereumDecrypt:
return ethereumDecrypt(this.#keyring, message);
case InternalMethods.unlock:
+ const password = message?.params?.[0] as string;
+ const mnemonic = await this.#keyring.getSavedMnemonic(password);
+ this.#wallet.setSecret(mnemonic);
return unlock(this.#keyring, message);
case InternalMethods.lock:
return lock(this.#keyring);
diff --git a/packages/extension/src/libs/keyring/keyring.ts b/packages/extension/src/libs/keyring/keyring.ts
index 4fc5f98e7..36b4ffbde 100644
--- a/packages/extension/src/libs/keyring/keyring.ts
+++ b/packages/extension/src/libs/keyring/keyring.ts
@@ -1,6 +1,5 @@
-import KeyRing from '@enkryptcom/keyring';
import { InternalStorageNamespace } from '@/types/provider';
-import BrowserStorage from '../common/browser-storage';
+import KeyRing from '@enkryptcom/keyring';
import {
EnkryptAccount,
HWWalletAdd,
@@ -11,6 +10,7 @@ import {
SignOptions,
WalletType,
} from '@enkryptcom/types';
+import BrowserStorage from '../common/browser-storage';
export class KeyRingBase {
#keyring: KeyRing;
constructor() {
@@ -70,6 +70,12 @@ export class KeyRingBase {
getKeysObject(): Promise<{ [key: string]: EnkryptAccount }> {
return this.#keyring.getKeysObject();
}
+ getPrivateKey(seed: Buffer) {
+ return this.#keyring.getPrivateKey(seed);
+ }
+ getSavedMnemonic(password: string) {
+ return this.#keyring.getSavedMnemonic(password);
+ }
addHWAccount(account: HWWalletAdd): Promise {
return this.#keyring.addHWAccount(account);
}
diff --git a/packages/extension/src/libs/keyring/public-keyring.ts b/packages/extension/src/libs/keyring/public-keyring.ts
index e5cbb58b6..8938f5be8 100644
--- a/packages/extension/src/libs/keyring/public-keyring.ts
+++ b/packages/extension/src/libs/keyring/public-keyring.ts
@@ -1,8 +1,9 @@
import {
- SignerType,
+ EnkryptAccount,
Errors,
+ SignerType,
+ SignOptions,
WalletType,
- EnkryptAccount,
} from '@enkryptcom/types';
import { KeyRingBase } from './keyring';
@@ -145,5 +146,13 @@ class PublicKeyRing {
return alreadyExists;
}
+
+ async getPrivateKey(seed: Buffer) {
+ return this.#keyring.getPrivateKey(seed);
+ }
+
+ getSavedMnemonic(password: string) {
+ return this.#keyring.getSavedMnemonic(password);
+ }
}
export default PublicKeyRing;
diff --git a/packages/extension/src/libs/spark-handler/constants.ts b/packages/extension/src/libs/spark-handler/constants.ts
new file mode 100644
index 000000000..7894ed8ef
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/constants.ts
@@ -0,0 +1,3 @@
+export const SPARK_TX_TYPE = 9;
+
+export const LOCK_TIME = 999999;
diff --git a/packages/extension/src/libs/spark-handler/createTempFromSparkTx.ts b/packages/extension/src/libs/spark-handler/createTempFromSparkTx.ts
new file mode 100644
index 000000000..6613d050c
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/createTempFromSparkTx.ts
@@ -0,0 +1,39 @@
+import * as bitcoin from 'bitcoinjs-lib';
+import { LOCK_TIME, SPARK_TX_TYPE } from '@/libs/spark-handler/constants';
+import { isSparkAddress } from '@/providers/bitcoin/libs/utils';
+
+interface CreateTempFromSparkTxArgs {
+ network: bitcoin.networks.Network;
+ to: string;
+ amount: string;
+}
+
+// TODO: please check do we need all this temTx stuff if we only using **txHashSig**
+
+export const createTempFromSparkTx = async ({
+ network,
+ amount,
+ to,
+}: CreateTempFromSparkTxArgs): Promise => {
+ const isSparkTransaction = await isSparkAddress(to);
+
+ const tempTx = new bitcoin.Psbt({ network });
+ tempTx.setVersion(3 | (SPARK_TX_TYPE << 16)); // version 3 and tx type in high bits (3 | (SPARK_TX_TYPE << 16));
+ tempTx.setLocktime(LOCK_TIME); // new Date().getTime() / 1000
+
+ tempTx.addInput({
+ hash: '0000000000000000000000000000000000000000000000000000000000000000',
+ index: 4294967295,
+ sequence: 4294967295,
+ finalScriptSig: Buffer.from('d3', 'hex'),
+ });
+
+ const tempTxBuffer = tempTx.extractTransaction(true).toBuffer();
+ const extendedTempTxBuffer = Buffer.concat([
+ tempTxBuffer,
+ Buffer.from([0x00]),
+ ]);
+
+ const txHash = bitcoin.crypto.hash256(extendedTempTxBuffer);
+ return txHash.reverse().toString('hex');
+};
diff --git a/packages/extension/src/libs/spark-handler/createTempTx.ts b/packages/extension/src/libs/spark-handler/createTempTx.ts
new file mode 100644
index 000000000..7a5ef1c7e
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/createTempTx.ts
@@ -0,0 +1,79 @@
+import { validator } from '@/providers/bitcoin/libs/firo-wallet/firo-wallet';
+import BigNumber from 'bignumber.js';
+import * as bitcoin from 'bitcoinjs-lib';
+import { ECPairInterface } from 'ecpair';
+
+interface CreateTempTxArgs {
+ network: bitcoin.networks.Network;
+ changeAmount: BigNumber;
+ mintValueOutput: {
+ script: Buffer;
+ value: number;
+ }[];
+ inputs: {
+ hash: string;
+ index: number;
+ nonWitnessUtxo: Buffer;
+ }[];
+ spendableUtxos: {
+ keyPair: ECPairInterface;
+ address: string;
+ txid: string;
+ vout: number;
+ scriptPubKey: string;
+ amount: number;
+ satoshis: number;
+ confirmations: number;
+ }[];
+ addressKeyPairs: Record;
+}
+
+export const createTempTx = ({
+ network,
+ inputs,
+ spendableUtxos,
+ addressKeyPairs,
+ mintValueOutput,
+ changeAmount,
+}: CreateTempTxArgs) => {
+ const tx = new bitcoin.Psbt({ network });
+ tx.setVersion(2);
+
+ inputs.forEach(input => {
+ tx.addInput(input);
+ });
+
+ mintValueOutput.forEach(mint => {
+ tx.addOutput({
+ script: mint.script,
+ value: mint.value,
+ });
+ });
+
+ if (changeAmount.gt(0)) {
+ const firstUtxoAddress = spendableUtxos[0].address;
+
+ tx.addOutput({
+ address: firstUtxoAddress!,
+ value: changeAmount.toNumber(),
+ });
+ }
+
+ for (let index = 0; index < spendableUtxos.length; index++) {
+ const utxo = spendableUtxos[index];
+ const keyPair = addressKeyPairs[utxo.address];
+
+ const Signer = {
+ sign: (hash: Uint8Array) => {
+ return Buffer.from(keyPair.sign(hash));
+ },
+ publicKey: Buffer.from(keyPair.publicKey),
+ } as unknown as bitcoin.Signer;
+
+ tx.signInput(index, Signer);
+ }
+ tx.validateSignaturesOfAllInputs(validator);
+ tx.finalizeAllInputs();
+
+ return tx.extractTransaction();
+};
diff --git a/packages/extension/src/libs/spark-handler/generateSparkWallet.ts b/packages/extension/src/libs/spark-handler/generateSparkWallet.ts
new file mode 100644
index 000000000..6787c568c
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/generateSparkWallet.ts
@@ -0,0 +1,172 @@
+import PublicKeyRing from '@/libs/keyring/public-keyring';
+import { SparkAccount } from '@/ui/action/types/account';
+import { PublicFiroWallet } from '@/providers/bitcoin/libs/firo-wallet/public-firo-wallet';
+import { wasmInstance } from '@/libs/utils/wasm-loader';
+
+export const getSpendKeyObj = async (wasm: WasmModule) => {
+ const wallet = new PublicFiroWallet();
+ const seed = await wallet.getSecret();
+ let keyDataPtr;
+ let spendKeyObj;
+ let spendKeyDataObj;
+ try {
+ const keyring = new PublicKeyRing();
+
+ const { pk } = await keyring.getPrivateKey(seed);
+
+ const keyData = Buffer.from(pk, 'hex');
+
+ keyDataPtr = wasm._malloc(keyData.length);
+ wasm.HEAPU8.set(keyData, keyDataPtr);
+
+ spendKeyDataObj = wasm.ccall(
+ 'js_createSpendKeyData',
+ 'number',
+ ['number', 'number'],
+ [keyDataPtr, 1],
+ );
+
+ if (spendKeyDataObj === 0) {
+ throw new Error('Failed to create SpendKeyData');
+ }
+
+ spendKeyObj = wasm.ccall(
+ 'js_createSpendKey',
+ 'number',
+ ['number'],
+ [spendKeyDataObj],
+ );
+
+ if (spendKeyObj === 0) {
+ throw new Error('Failed to create SpendKey');
+ }
+
+ return spendKeyObj;
+ } catch (e) {
+ console.log(e);
+ return 0;
+ } finally {
+ if (spendKeyDataObj && spendKeyDataObj !== 0) {
+ wasm.ccall('js_freeSpendKeyData', null, ['number'], [spendKeyDataObj]);
+ }
+ if (keyDataPtr && keyDataPtr !== 0) {
+ wasm._free(keyDataPtr);
+ }
+ }
+};
+
+export const getIncomingViewKey = async (
+ wasm: WasmModule,
+ spendKeyObj: number,
+): Promise<{ fullViewKeyObj: number; incomingViewKeyObj: number }> => {
+ let fullViewKeyObj;
+ let incomingViewKeyObj;
+ try {
+ fullViewKeyObj = wasm.ccall(
+ 'js_createFullViewKey',
+ 'number',
+ ['number'],
+ [spendKeyObj],
+ );
+
+ if (fullViewKeyObj === 0) {
+ throw new Error('Failed to create FullViewKey');
+ }
+
+ incomingViewKeyObj = wasm.ccall(
+ 'js_createIncomingViewKey',
+ 'number',
+ ['number'],
+ [fullViewKeyObj],
+ );
+
+ if (incomingViewKeyObj === 0) {
+ throw new Error('Failed to create IncomingViewKey');
+ }
+
+ return { incomingViewKeyObj, fullViewKeyObj };
+ } catch {
+ if (fullViewKeyObj && fullViewKeyObj !== 0) {
+ wasm.ccall('js_freeFullViewKey', null, ['number'], [fullViewKeyObj]);
+ }
+ if (incomingViewKeyObj && incomingViewKeyObj !== 0) {
+ wasm.ccall(
+ 'js_freeIncomingViewKey',
+ null,
+ ['number'],
+ [incomingViewKeyObj],
+ );
+ }
+ return { incomingViewKeyObj: 0, fullViewKeyObj: 0 };
+ }
+};
+
+export async function getSparkState(
+ diversifier = 1,
+ isTestNetwork = false,
+): Promise | undefined> {
+ const wasm = await wasmInstance.getInstance();
+
+ if (!wasm) {
+ console.error('Wasm not loaded');
+ return;
+ }
+
+ const is_test_network = isTestNetwork ? 1 : 0;
+
+ const spendKeyObj = await getSpendKeyObj(wasm);
+
+ const { incomingViewKeyObj, fullViewKeyObj } = await getIncomingViewKey(
+ wasm,
+ spendKeyObj,
+ );
+
+ if (fullViewKeyObj === 0) {
+ console.error('Failed to create FullViewKey');
+ wasm.ccall('js_freeSpendKey', null, ['number'], [spendKeyObj]);
+ return;
+ }
+
+ if (incomingViewKeyObj === 0) {
+ console.error('Failed to create IncomingViewKey');
+ wasm.ccall('js_freeFullViewKey', null, ['number'], [fullViewKeyObj]);
+ wasm.ccall('js_freeSpendKey', null, ['number'], [spendKeyObj]);
+ return;
+ }
+
+ const addressObj = wasm.ccall(
+ 'js_getAddress',
+ 'number',
+ ['number', 'number'],
+ [incomingViewKeyObj, BigInt(diversifier)],
+ );
+
+ if (addressObj === 0) {
+ console.error('Failed to get Address');
+ wasm.ccall(
+ 'js_freeIncomingViewKey',
+ null,
+ ['number'],
+ [incomingViewKeyObj],
+ );
+ wasm.ccall('js_freeFullViewKey', null, ['number'], [fullViewKeyObj]);
+ wasm.ccall('js_freeSpendKey', null, ['number'], [spendKeyObj]);
+ return;
+ }
+
+ const address_enc_main = wasm.ccall(
+ 'js_encodeAddress',
+ 'string',
+ ['number', 'number'],
+ [addressObj, is_test_network],
+ );
+
+ wasm.ccall('js_freeSpendKey', null, ['number'], [spendKeyObj]);
+ wasm.ccall('js_freeFullViewKey', null, ['number'], [fullViewKeyObj]);
+ wasm.ccall('js_freeIncomingViewKey', null, ['number'], [incomingViewKeyObj]);
+ wasm.ccall('_js_freeAddress', null, ['number'], [addressObj]);
+
+ return {
+ defaultAddress: address_enc_main,
+ };
+}
diff --git a/packages/extension/src/libs/spark-handler/getFee.ts b/packages/extension/src/libs/spark-handler/getFee.ts
new file mode 100644
index 000000000..95375572a
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/getFee.ts
@@ -0,0 +1,12 @@
+import { SATOSHI } from '@/providers/bitcoin/libs/firo-wallet/firo-wallet';
+import BigNumber from 'bignumber.js';
+import * as bitcoin from 'bitcoinjs-lib';
+import { firoElectrum } from '@/providers/bitcoin/libs/electrum-client/electrum-client.ts';
+
+export const getFee = async (tempTx: bitcoin.Transaction) => {
+ const feeRate = (await firoElectrum.estimateFee(2)) as number;
+
+ return new BigNumber(feeRate)
+ .times(Math.ceil(tempTx.virtualSize() / 1000))
+ .times(SATOSHI);
+};
diff --git a/packages/extension/src/libs/spark-handler/getMintTxData.ts b/packages/extension/src/libs/spark-handler/getMintTxData.ts
new file mode 100644
index 000000000..078ad5716
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/getMintTxData.ts
@@ -0,0 +1,108 @@
+import { serializeMintContext } from './serializeMintContext';
+
+interface IArgs {
+ wasmModule: WasmModule | undefined;
+ address: string;
+ amount: string;
+ utxos: {
+ txHash: Buffer;
+ vout: number;
+ txHashLength: number;
+ }[];
+ memo?: string;
+ isTestNetwork?: boolean;
+}
+
+export const getMintTxData = async ({
+ wasmModule,
+ address,
+ amount,
+ utxos,
+ isTestNetwork = false,
+ memo = '',
+}: IArgs) => {
+ if (!wasmModule) {
+ console.error('Wasm not loaded');
+ return;
+ }
+
+ const decodedAddressPtr = wasmModule.ccall(
+ 'js_decodeAddress',
+ 'number',
+ ['string', 'number'],
+ [address, Number(isTestNetwork)],
+ );
+
+ const outputsArray = [];
+
+ const mintedCoinData = wasmModule.ccall(
+ 'js_createMintedCoinData',
+ 'number',
+ ['number', 'number', 'string'],
+ [decodedAddressPtr, BigInt(amount), memo],
+ );
+
+ if (!mintedCoinData) {
+ throw new Error(`Failed to create MintedCoinData`);
+ }
+ outputsArray.push(mintedCoinData);
+
+ const pointerSize = 4;
+ const outputsPointerArray = wasmModule._malloc(pointerSize);
+
+ outputsArray.forEach((outputPointer, index) => {
+ wasmModule.HEAP32[(outputsPointerArray >> 2) + index] = outputPointer;
+ });
+
+ const { context: serialContext } = serializeMintContext(utxos);
+
+ const serialContextPointer = wasmModule._malloc(serialContext.length);
+ wasmModule.HEAPU8.set(serialContext, serialContextPointer);
+
+ const recipientsVectorPtr = wasmModule._js_createSparkMintRecipients(
+ outputsPointerArray,
+ 1,
+ serialContextPointer,
+ serialContext.length,
+ 1,
+ );
+
+ if (!recipientsVectorPtr) {
+ throw new Error('Failed to call `js_createSparkMintRecipients`.');
+ }
+
+ const recipientsLength =
+ wasmModule._js_getRecipientVectorLength(recipientsVectorPtr);
+
+ const recipientsOutput = [];
+
+ for (let i = 0; i < recipientsLength; i++) {
+ const recipientPtr = wasmModule._js_getRecipientAt(recipientsVectorPtr, i);
+
+ const scriptPubKeySize =
+ wasmModule._js_getRecipientScriptPubKeySize(recipientPtr);
+ const scriptPubKeyPointer =
+ wasmModule._js_getRecipientScriptPubKey(recipientPtr);
+ const scriptPubKey = new Uint8Array(scriptPubKeySize);
+ for (let j = 0; j < scriptPubKeySize; j++) {
+ scriptPubKey[j] = wasmModule.HEAPU8[scriptPubKeyPointer + j];
+ }
+
+ const amount = wasmModule._js_getRecipientAmount(recipientPtr);
+ const subtractFeeFlag =
+ wasmModule._js_getRecipientSubtractFeeFromAmountFlag(recipientPtr);
+
+ recipientsOutput.push({
+ scriptPubKey: Array.from(scriptPubKey)
+ .map(b => b.toString(16).padStart(2, '0'))
+ .join(''),
+ amount,
+ subtractFeeFlag,
+ });
+ }
+
+ wasmModule._free(outputsPointerArray);
+ wasmModule._free(serialContextPointer);
+
+ return recipientsOutput;
+};
diff --git a/packages/extension/src/libs/spark-handler/getSerializedCoin.ts b/packages/extension/src/libs/spark-handler/getSerializedCoin.ts
new file mode 100644
index 000000000..2c4211cdd
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/getSerializedCoin.ts
@@ -0,0 +1,16 @@
+function toHexArray(u8: string) {
+ return Array.from(
+ Buffer.from(u8, 'base64'),
+ byte => `0x${byte.toString(16).padStart(2, '0')}`,
+ );
+}
+
+export const base64ToHex = (base64: string): string => {
+ const raw = Buffer.from(base64, 'base64');
+
+ return raw.toString('hex');
+};
+
+export const getSerializedCoin = (coin: string) => {
+ return toHexArray(coin);
+};
diff --git a/packages/extension/src/libs/spark-handler/getSparkCoinInfo.ts b/packages/extension/src/libs/spark-handler/getSparkCoinInfo.ts
new file mode 100644
index 000000000..6ad7aeb90
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/getSparkCoinInfo.ts
@@ -0,0 +1,294 @@
+import { getSerializedCoin } from './getSerializedCoin';
+
+interface IArgs {
+ coin: string[];
+ incomingViewKeyObj: number;
+ fullViewKeyObj: number;
+ wasmModule: WasmModule;
+ keepMemory?: boolean;
+}
+
+export interface SparkCoinValue {
+ value: bigint;
+ isUsed: boolean;
+ originalCoin: string[];
+ metaData: number;
+ tag: string;
+ deserializedCoinObj: number;
+}
+
+export const getSparkCoinInfo = async ({
+ coin,
+ fullViewKeyObj,
+ incomingViewKeyObj,
+ wasmModule,
+ keepMemory = false,
+}: IArgs): Promise => {
+ let deserializedCoinObj;
+ let inputDataObj;
+ let inputDataWithMetaObj;
+ let identifiedCoinObj;
+ let metadataObj;
+ try {
+ const serializedCoin = getSerializedCoin(
+ coin[0],
+ ) as unknown as ArrayLike;
+ const serializedCoinPointer = wasmModule._malloc(serializedCoin.length);
+ wasmModule.HEAPU8.set(serializedCoin, serializedCoinPointer);
+
+ const serialContext = getSerializedCoin(
+ coin[2],
+ ) as unknown as ArrayLike;
+ const serialContextPointer = wasmModule._malloc(serializedCoin.length);
+ wasmModule.HEAPU8.set(serialContext, serialContextPointer);
+
+ deserializedCoinObj = wasmModule.ccall(
+ 'js_deserializeCoin',
+ 'number',
+ ['number', 'number', 'number', 'number'],
+ [
+ serializedCoinPointer,
+ serializedCoin.length,
+ serialContextPointer,
+ serialContext.length,
+ ],
+ );
+ if (!deserializedCoinObj) {
+ throw new Error('Failed to deserialize coin.');
+ }
+
+ metadataObj = wasmModule.ccall(
+ 'js_getMetadata',
+ 'number',
+ ['number', 'number'],
+ [deserializedCoinObj, incomingViewKeyObj],
+ );
+
+ if (!metadataObj) {
+ throw new Error('FAILED_TO_GET_METADATA');
+ }
+
+ const coinFromMetaObj = wasmModule.ccall(
+ 'js_getCoinFromMeta',
+ 'number',
+ ['number', 'number'],
+ [metadataObj, incomingViewKeyObj],
+ );
+ if (!coinFromMetaObj) {
+ throw new Error('Failed to get coin from metadata.');
+ }
+
+ inputDataObj = wasmModule.ccall(
+ 'js_getInputData',
+ 'number',
+ ['number', 'number', 'number'],
+ [deserializedCoinObj, fullViewKeyObj, incomingViewKeyObj],
+ );
+ if (!inputDataObj) {
+ throw new Error('Failed to get input data.');
+ }
+
+ const inputTagHex = wasmModule.ccall(
+ 'js_getInputCoinDataTag_base64',
+ 'string',
+ ['number'],
+ [inputDataObj],
+ );
+
+ inputDataWithMetaObj = wasmModule.ccall(
+ 'js_getInputDataWithMeta',
+ 'number',
+ ['number', 'number', 'number'],
+ [deserializedCoinObj, metadataObj, fullViewKeyObj],
+ );
+ if (!inputDataWithMetaObj) {
+ throw new Error('Failed to get input data with metadata.');
+ }
+
+ identifiedCoinObj = wasmModule.ccall(
+ 'js_identifyCoin',
+ 'number',
+ ['number', 'number'],
+ [deserializedCoinObj, incomingViewKeyObj],
+ );
+ if (!identifiedCoinObj) {
+ throw new Error('Failed to identify coin.');
+ }
+
+ // Example usage of `js_getIdentifiedCoinDiversifier`
+ // const diversifier = wasmModule.ccall(
+ // 'js_getIdentifiedCoinDiversifier',
+ // 'number',
+ // ['number'],
+ // [identifiedCoinObj],
+ // );
+
+ const value = wasmModule.ccall(
+ 'js_getIdentifiedCoinValue',
+ 'number',
+ ['number'],
+ [identifiedCoinObj],
+ );
+
+ // Example usage of `js_getIdentifiedCoinMemo`
+ // const memo = wasmModule.UTF8ToString(
+ // wasmModule.ccall(
+ // 'js_getIdentifiedCoinMemo',
+ // 'number',
+ // ['number'],
+ // [identifiedCoinObj],
+ // ),
+ // );
+ // console.log('Identified Coin Memo:', memo);
+
+ // Example usage of `js_getCSparkMintMetaHeight`
+ // const metaHeight = wasmModule.ccall(
+ // 'js_getCSparkMintMetaHeight',
+ // 'number',
+ // ['number'],
+ // [metadataObj],
+ // );
+ // console.log('Spark Mint Meta Height:', metaHeight);
+
+ // // Example usage of `js_getCSparkMintMetaId`
+ // const metaId = wasmModule.ccall(
+ // 'js_getCSparkMintMetaId',
+ // 'number',
+ // ['number'],
+ // [metadataObj],
+ // );
+ // console.log('Spark Mint Meta ID:', metaId);
+
+ const metaIsUsed = wasmModule.ccall(
+ 'js_getCSparkMintMetaIsUsed',
+ 'number',
+ ['number'],
+ [metadataObj],
+ );
+
+ // Example usage of `js_getCSparkMintMetaMemo`
+ // const metaMemo = wasmModule.UTF8ToString(
+ // wasmModule.ccall(
+ // 'js_getCSparkMintMetaMemo',
+ // 'number',
+ // ['number'],
+ // [metadataObj],
+ // ),
+ // );
+ // console.log('Spark Mint Meta Memo:', metaMemo);
+
+ // Example usage of `js_getCSparkMintMetaDiversifier`
+ // const metaDiversifier = wasmModule.ccall(
+ // 'js_getCSparkMintMetaDiversifier',
+ // 'number',
+ // ['number'],
+ // [metadataObj],
+ // );
+ // console.log('Spark Mint Meta Diversifier:', metaDiversifier);
+
+ // Example usage of `js_getCSparkMintMetaValue`
+ // const metaValue = wasmModule.ccall(
+ // 'js_getCSparkMintMetaValue',
+ // 'number',
+ // ['number'],
+ // [metadataObj],
+ // );
+ // console.log('Spark Mint Meta Value:', metaValue);
+
+ // Example usage of `js_getCSparkMintMetaType`
+ // const metaType = wasmModule.ccall(
+ // 'js_getCSparkMintMetaType',
+ // 'number',
+ // ['number'],
+ // [metadataObj],
+ // );
+ // console.log('Spark Mint Meta Type:', metaType);
+ //
+ // // Example usage of `js_getCSparkMintMetaCoin`
+ // const metaCoinObj = wasmModule.ccall(
+ // 'js_getCSparkMintMetaCoin',
+ // 'number',
+ // ['number'],
+ // [metadataObj],
+ // );
+ // console.log('Spark Mint Meta Coin:', metaCoinObj);
+ //
+ // // Example usage of `js_getInputCoinDataCoverSetId`
+ // const inputCoverSetId = wasmModule.ccall(
+ // 'js_getInputCoinDataCoverSetId',
+ // 'number',
+ // ['number'],
+ // [inputDataObj],
+ // );
+ // console.log('Input Coin Data Cover Set ID:', inputCoverSetId);
+ //
+ // // Example usage of `js_getInputCoinDataIndex`
+ // const inputIndex = wasmModule.ccall(
+ // 'js_getInputCoinDataIndex',
+ // 'number',
+ // ['number'],
+ // [inputDataObj],
+ // );
+ // console.log('Input Coin Data Index:', inputIndex);
+ //
+ // // Example usage of `js_getInputCoinDataValue`
+ // const inputValue = wasmModule.ccall(
+ // 'js_getInputCoinDataValue',
+ // 'number',
+ // ['number'],
+ // [inputDataObj],
+ // );
+ // console.log('Input Coin Data Value:', inputValue);
+
+ // const savedOwnCoins = await db.readData('ownCoins');
+ // await db.saveData('', [
+ // ...savedOwnCoins,
+ // {
+ // diversifier,
+ // value,
+ // memo,
+ // metaHeight,
+ // metaId,
+ // },
+ // ]);
+
+ return {
+ value,
+ isUsed: !!metaIsUsed,
+ originalCoin: coin,
+ metaData: metadataObj,
+ tag: inputTagHex,
+ deserializedCoinObj,
+ };
+ } catch (e) {
+ throw e;
+ } finally {
+ if (!keepMemory) {
+ wasmModule.ccall('js_freeCoin', null, ['number'], [deserializedCoinObj]);
+ wasmModule.ccall(
+ 'js_freeInputCoinData',
+ null,
+ ['number'],
+ [inputDataObj],
+ );
+ wasmModule.ccall(
+ 'js_freeInputCoinData',
+ null,
+ ['number'],
+ [inputDataWithMetaObj],
+ );
+ wasmModule.ccall(
+ 'js_freeIdentifiedCoinData',
+ null,
+ ['number'],
+ [identifiedCoinObj],
+ );
+ wasmModule.ccall(
+ 'js_freeCSparkMintMeta',
+ null,
+ ['number'],
+ [metadataObj],
+ );
+ }
+ }
+};
diff --git a/packages/extension/src/libs/spark-handler/getTotalMintedAmount.ts b/packages/extension/src/libs/spark-handler/getTotalMintedAmount.ts
new file mode 100644
index 000000000..e9f93ca2a
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/getTotalMintedAmount.ts
@@ -0,0 +1,27 @@
+import BigNumber from 'bignumber.js';
+import { firoElectrum } from '@/providers/bitcoin/libs/electrum-client/electrum-client';
+import { UnspentTxOutputModel } from '@/providers/bitcoin/libs/electrum-client/abstract-electrum';
+import { ECPairInterface } from 'ecpair';
+
+type SpendableUtxo = Omit & {
+ keyPair: ECPairInterface;
+};
+
+export const getTotalMintedAmount = async (spendableUtxos: SpendableUtxo[]) => {
+ let inputAmount = 0;
+ const psbtInputs = [];
+
+ for (const utxo of spendableUtxos) {
+ const txRaw = await firoElectrum.getTxRaw(utxo.txid);
+
+ psbtInputs.push({
+ hash: utxo.txid,
+ index: utxo.vout,
+ nonWitnessUtxo: Buffer.from(txRaw, 'hex'),
+ });
+
+ inputAmount += utxo.satoshis;
+ }
+
+ return { inputAmountBn: new BigNumber(inputAmount), psbtInputs };
+};
diff --git a/packages/extension/src/libs/spark-handler/index.ts b/packages/extension/src/libs/spark-handler/index.ts
new file mode 100644
index 000000000..5588833ae
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/index.ts
@@ -0,0 +1,585 @@
+import { wasmInstance } from '@/libs/utils/wasm-loader';
+import {
+ getIncomingViewKey,
+ getSpendKeyObj,
+} from '@/libs/spark-handler/generateSparkWallet';
+import { getSparkCoinInfo } from '@/libs/spark-handler/getSparkCoinInfo';
+import { DB_DATA_KEYS, IndexedDBHelper } from '@action/db/indexedDB';
+import { OwnedCoinData } from '@action/workers/sparkCoinInfoWorker';
+import * as bitcoin from 'bitcoinjs-lib';
+import { isSparkAddress } from '@/providers/bitcoin/libs/utils';
+import {
+ base64ToHex,
+ getSerializedCoin,
+} from '@/libs/spark-handler/getSerializedCoin';
+import {
+ base64ToReversedHex,
+ numberToReversedHex,
+} from '@/libs/spark-handler/utils';
+import { LOCK_TIME, SPARK_TX_TYPE } from '@/libs/spark-handler/constants';
+import { intersectSets } from '@action/utils/set-utils';
+import BigNumber from 'bignumber.js';
+
+export async function sendFromSparkAddress(
+ network: bitcoin.Network,
+ to: string,
+ amount: string,
+): Promise {
+ let spendKeyObj = 0;
+ let fullViewKeyObj = 0;
+ let incomingViewKeyObj = 0;
+ let addressObj = 0;
+ let recipientsVector = 0;
+ let privateRecipientsVector = 0;
+ let coinsList = 0;
+ let coverSetDataMap = 0;
+ let idAndBlockHashesMap = 0;
+ let result = 0;
+ let deserializedCoinObj = 0;
+ let coverSetData = 0;
+ const diversifier = 1n;
+ const db = new IndexedDBHelper();
+ const Module = await wasmInstance.getInstance();
+
+ spendKeyObj = await getSpendKeyObj(Module);
+ const isSparkTransaction = await isSparkAddress(to);
+
+ const ownedCoins = ((await db.readData('myCoins')) ||
+ []) as OwnedCoinData[];
+
+ const usedCoinsTags = await db.readData<{ tags: string[] }>(
+ DB_DATA_KEYS.usedCoinsTags,
+ );
+ const coinsTagsSet = new Set(usedCoinsTags.tags);
+ const myCoinsTagsSet = new Set((ownedCoins ?? []).map(coin => coin.tag));
+
+ const usedMyCoinsTagsSet = intersectSets(coinsTagsSet, myCoinsTagsSet);
+ const revalidatedCoins = ownedCoins.filter(ownedCoin => {
+ return !usedMyCoinsTagsSet.has(ownedCoin.tag);
+ });
+
+ const spendCoinList: {
+ coin: string[];
+ setId: number;
+ metadata: number;
+ deserializedCoinObj: number;
+ }[] = [];
+
+ const keyObjects = await getIncomingViewKey(
+ Module,
+ spendKeyObj,
+ );
+
+ incomingViewKeyObj = keyObjects.incomingViewKeyObj
+ fullViewKeyObj = keyObjects.fullViewKeyObj
+
+ addressObj = Module.ccall(
+ 'js_getAddress',
+ 'number',
+ ['number', 'number'],
+ [incomingViewKeyObj, diversifier],
+ );
+
+ // Create recipients vector for spend transaction
+ recipientsVector = Module.ccall(
+ 'js_createRecipientsVectorForCreateSparkSpendTransaction',
+ 'number',
+ ['number'],
+ [1], // intended final size
+ );
+
+ if (!isSparkTransaction) {
+ Module.ccall(
+ 'js_addRecipientForCreateSparkSpendTransaction',
+ null,
+ ['number', 'number', 'number'],
+ [recipientsVector, BigInt(+amount * 10 ** 8), 0],
+ );
+ }
+
+ privateRecipientsVector = Module.ccall(
+ 'js_createPrivateRecipientsVectorForCreateSparkSpendTransaction',
+ 'number',
+ ['number'],
+ [1], // intended final size
+ );
+
+ Module.ccall(
+ 'js_addPrivateRecipientForCreateSparkSpendTransaction',
+ null,
+ ['number', 'number', 'number', 'string', 'number'],
+ [
+ privateRecipientsVector,
+ addressObj,
+ BigInt(+amount * 10 ** 8),
+ 'Private memo',
+ 1,
+ ],
+ );
+
+ coinsList = Module.ccall(
+ 'js_createCoinsListForCreateSparkSpendTransaction',
+ 'number',
+ [],
+ [],
+ );
+
+ const coinMetaPromiseList: Promise[] = [];
+
+ revalidatedCoins.forEach(ownedCoin => {
+ const myCoinMetaData = getSparkCoinInfo({
+ coin: ownedCoin.coin,
+ fullViewKeyObj,
+ incomingViewKeyObj,
+ wasmModule: Module,
+ keepMemory: true,
+ })
+ .then(data => {
+ if (!data.isUsed) {
+ spendCoinList.push({
+ coin: ownedCoin.coin,
+ setId: ownedCoin.setId,
+ metadata: data.metaData,
+ deserializedCoinObj: data.deserializedCoinObj,
+ });
+ } else {
+ }
+ })
+ .catch(err => {
+ console.error('Error getting spark coin info', err);
+ });
+ coinMetaPromiseList.push(myCoinMetaData);
+ });
+
+ await Promise.allSettled(coinMetaPromiseList);
+
+ spendCoinList.forEach(spendCoin => {
+ Module.ccall(
+ 'js_setCSparkMintMetaId', // C++ function name
+ null, // Return type
+ ['number', 'number'], // Argument types
+ [spendCoin.metadata, spendCoin.setId],
+ );
+ });
+
+ try {
+ spendCoinList.forEach(spendCoin => {
+ Module.ccall(
+ 'js_addCoinToListForCreateSparkSpendTransaction',
+ null,
+ ['number', 'number'],
+ [coinsList, spendCoin.metadata],
+ );
+ });
+ } catch (error) {
+ console.error('Error adding coins to list:', error);
+ }
+ coverSetDataMap = Module.ccall(
+ 'js_createCoverSetDataMapForCreateSparkSpendTransaction',
+ 'number',
+ [],
+ [],
+ );
+
+ const groupedBySet = spendCoinList.reduce(
+ (acc, coin) => {
+ if (!acc[coin.setId]) {
+ acc[coin.setId] = [];
+ }
+ acc[coin.setId].push(coin);
+ return acc;
+ },
+ {} as Record,
+ );
+
+ console.debug('groupedBySet', groupedBySet);
+
+ const deserializedCoinList: Record = {};
+ // TODO: move to separate function
+
+ for (const set in groupedBySet) {
+ const fullSet = await db.getSetById(Number(set));
+ fullSet?.coins.forEach(coin => {
+ const serializedCoin = getSerializedCoin(
+ coin[0] as string,
+ ) as unknown as ArrayLike;
+
+ const serializedCoinPointer = Module._malloc(serializedCoin.length);
+ Module.HEAPU8.set(serializedCoin, serializedCoinPointer);
+
+ const serialContext = getSerializedCoin(
+ coin[2] as string,
+ ) as unknown as ArrayLike;
+
+ const serialContextPointer = Module._malloc(serialContext.length);
+ Module.HEAPU8.set(serialContext, serialContextPointer);
+
+ deserializedCoinObj = Module.ccall(
+ 'js_deserializeCoin',
+ 'number',
+ ['number', 'number', 'number', 'number'],
+ [
+ serializedCoinPointer,
+ serializedCoin.length,
+ serialContextPointer,
+ serialContext.length,
+ ],
+ );
+
+ if (!deserializedCoinList[set]) {
+ deserializedCoinList[set] = [];
+ }
+ deserializedCoinList[set].push(deserializedCoinObj);
+ });
+ }
+
+ const setHashList = await db.getSetHashes();
+
+ for (const setId in groupedBySet) {
+ const coverSetRepresentation = Buffer.from(
+ setHashList[+setId - 1],
+ 'base64',
+ );
+ console.debug('coverSetRepresentation :=>', coverSetRepresentation);
+ const coverSetRepresentationPointer = Module._malloc(
+ coverSetRepresentation.length,
+ );
+ Module.HEAPU8.set(coverSetRepresentation, coverSetRepresentationPointer);
+
+ coverSetData = Module.ccall(
+ 'js_createCoverSetData',
+ 'number',
+ ['number', 'number'],
+ [coverSetRepresentationPointer, coverSetRepresentation.length],
+ );
+
+ console.debug(groupedBySet, setId);
+ deserializedCoinList[setId].forEach(deserializedCoin => {
+ Module.ccall(
+ 'js_addCoinToCoverSetData',
+ null,
+ ['number', 'number'],
+ [coverSetData, deserializedCoin],
+ );
+ // console.count('Deserialized coin added');
+ });
+
+ // Add cover set data to map (with group ID 1)
+ Module.ccall(
+ 'js_addCoverSetDataForCreateSparkSpendTransaction',
+ null,
+ ['number', 'number', 'number'],
+ [coverSetDataMap, BigInt(setId), coverSetData],
+ );
+ }
+
+ idAndBlockHashesMap = Module.ccall(
+ 'js_createIdAndBlockHashesMapForCreateSparkSpendTransaction',
+ 'number',
+ [],
+ [],
+ );
+
+ const blockHashList = await db.getBlockHashes();
+ console.log('blockHashList =>>>', blockHashList);
+ for (const setId in groupedBySet) {
+ console.log(BigInt(setId), base64ToHex(blockHashList[+setId - 1]));
+ Module.ccall(
+ 'js_addIdAndBlockHashForCreateSparkSpendTransaction',
+ null,
+ ['number', 'number', 'string'],
+ [
+ idAndBlockHashesMap,
+ BigInt(setId),
+ base64ToReversedHex(blockHashList[+setId - 1]),
+ ],
+ );
+ }
+
+ const tempTx = new bitcoin.Psbt({ network: network.networkInfo });
+ tempTx.setVersion(3 | (SPARK_TX_TYPE << 16)); // version 3 and tx type in high bits (3 | (SPARK_TX_TYPE << 16));
+ tempTx.setLocktime(LOCK_TIME); // new Date().getTime() / 1000
+
+ tempTx.addInput({
+ hash: '0000000000000000000000000000000000000000000000000000000000000000',
+ index: 4294967295,
+ sequence: 4294967295,
+ finalScriptSig: Buffer.from('d3', 'hex'),
+ });
+
+ const tempTxBuffer = tempTx.extractTransaction(true).toBuffer();
+ const extendedTempTxBuffer = Buffer.concat([
+ tempTxBuffer,
+ Buffer.from([0x00]),
+ ]);
+
+ const txHash = bitcoin.crypto.hash256(extendedTempTxBuffer);
+ const txHashSig = txHash.reverse().toString('hex');
+
+ // TODO: check not spark case
+ if (!isSparkTransaction) {
+ tempTx.addOutput({
+ address: to,
+ value: new BigNumber(amount).multipliedBy(10 ** 8).toNumber(),
+ });
+ }
+
+ //tempTx// tx.signInput(0, spendKeyObj); // ? how to sign? Is I need to sign wit all utxo keypairs? // ? how to add subtractFeeFromAmount? // ? what is spend transaction type? // https://github.com/firoorg/sparkmobile/blob/main/include/spark.h#L22
+ // tx.finalizeAllInputs();
+ // const txHash = tx.extractTransaction()
+
+ // const txHashSig = txHash.getHash()
+
+ console.log('coinlist', coinsList);
+ const additionalTxSize = 0;
+
+ // Create the spend transaction
+ result = Module.ccall(
+ 'js_createSparkSpendTransaction',
+ 'number',
+ [
+ 'number',
+ 'number',
+ 'number',
+ 'number',
+ 'number',
+ 'number',
+ 'number',
+ 'number',
+ 'string',
+ 'number',
+ ],
+ [
+ spendKeyObj,
+ fullViewKeyObj,
+ incomingViewKeyObj,
+ recipientsVector,
+ privateRecipientsVector,
+ coinsList,
+ coverSetDataMap,
+ idAndBlockHashesMap,
+ txHashSig,
+ additionalTxSize,
+ ],
+ );
+
+ console.log('final result is', result);
+
+ const serializedSpend = Module.ccall(
+ 'js_getCreateSparkSpendTxResultSerializedSpend',
+ 'number', // returns a pointer to the beginning of a byte array
+ ['number'],
+ [result],
+ );
+
+ console.log('serializedSpend ==> ==>', serializedSpend);
+
+ const serializedSpendSize = Module.ccall(
+ 'js_getCreateSparkSpendTxResultSerializedSpendSize',
+ 'number',
+ ['number'],
+ [result],
+ );
+
+ console.log(`Serialized spend size: `, serializedSpendSize);
+
+ const serializedSpendBytes = new Uint8Array(
+ Module.HEAPU8.buffer,
+ serializedSpend,
+ serializedSpendSize,
+ );
+
+ // Make a copy (optional, because the above is just a view into WASM memory)
+ const spendDataCopy = new Uint8Array(serializedSpendBytes);
+
+ // If you need hex
+ const hex = Array.from(spendDataCopy)
+ .map(b => b.toString(16).padStart(2, '0'))
+ .join('');
+
+ console.log('Serialized Spend (hex):', hex);
+
+ const outputScriptsSize = Module.ccall(
+ 'js_getCreateSparkSpendTxResultOutputScriptsSize',
+ 'number',
+ ['number'],
+ [result],
+ );
+ const scripts = [];
+
+ // Get each output script
+ for (let i = 0; i < outputScriptsSize; i++) {
+ const scriptPtr = Module.ccall(
+ 'js_getCreateSparkSpendTxResultOutputScriptAt',
+ 'number', // returns a pointer to the beginning of a byte array
+ ['number', 'number'],
+ [result, i],
+ );
+
+ console.log(`Output script in for:`, scriptPtr);
+ const scriptSize = Module.ccall(
+ 'js_getCreateSparkSpendTxResultOutputScriptSizeAt',
+ 'number',
+ ['number', 'number'],
+ [result, i],
+ );
+ console.log(`Output script ${i} size: ${scriptSize}`);
+
+ const script = new Uint8Array(Module.HEAPU8.buffer, scriptPtr, scriptSize);
+
+ scripts.push(script);
+ }
+
+ // Get spent coins information
+ const spentCoinsSize = Module.ccall(
+ 'js_getCreateSparkSpendTxResultSpentCoinsSize',
+ 'number',
+ ['number'],
+ [result],
+ );
+
+ console.log(`Spent coins size: ${spentCoinsSize}`);
+
+ for (let i = 0; i < spentCoinsSize; i++) {
+ const spentCoinMeta = Module.ccall(
+ 'js_getCreateSparkSpendTxResultSpentCoinAt',
+ 'number',
+ ['number', 'number'],
+ [result, i],
+ );
+ const spentCoinValue = Module.ccall(
+ 'js_getCSparkMintMetaValue',
+ 'number',
+ ['number'],
+ [spentCoinMeta],
+ );
+
+ // const coinPtr = Module.ccall(
+ // '_js_getCoinFromMeta',
+ // 'number',
+ // ['number', 'number'],
+ // [spentCoinMeta, incomingViewKeyObj],
+ // );
+ //
+ // const hash = Module.ccall(
+ // '_js_getCoinHash',
+ // 'number',
+ // ['number'],
+ // [coinPtr],
+ // );
+ //
+ // console.log(
+ // `coin hash =>>>>>>======>>>>><<<<<======<<<<<<<======: ${hash}`,
+ // );
+ const spentCoinMetaDeserialized = new Uint8Array(
+ Module.HEAPU8.buffer,
+ spentCoinMeta,
+ spentCoinMeta.length,
+ );
+
+ console.log(
+ `spend coins meta @nd value ${spentCoinValue}, ${spentCoinMeta} ${spentCoinMetaDeserialized.toString()}`,
+ );
+ }
+
+ // Get transaction fee
+ // const fee = Module.ccall(
+ // 'js_getCreateSparkSpendTxResultFee',
+ // 'number',
+ // ['number'],
+ // [result],
+ // );
+
+ const psbt = new bitcoin.Psbt({ network: network.networkInfo });
+
+ // const api = (await network.api()) as unknown as FiroAPI;
+
+ psbt.addInput({
+ hash: '0000000000000000000000000000000000000000000000000000000000000000',
+ index: 4294967295,
+ sequence: 4294967295,
+ finalScriptSig: Buffer.from('d3', 'hex'),
+ });
+
+ psbt.setLocktime(LOCK_TIME);
+
+ psbt.setVersion(3 | (SPARK_TX_TYPE << 16));
+ scripts.forEach(script => {
+ console.log('script is ==>', script);
+ psbt.addOutput({
+ script: Buffer.from(script),
+ value: 0,
+ });
+ });
+
+ const rawTx = psbt.extractTransaction();
+ const txHex = rawTx.toHex();
+ const sizeHex = numberToReversedHex(serializedSpendSize);
+ const finalTx = txHex + 'fd' + sizeHex + hex;
+
+ console.log('Final TX to broadcast:', finalTx);
+
+ if (spendKeyObj && spendKeyObj !== 0) {
+ Module.ccall('js_freeSpendKey', null, ['number'], [spendKeyObj]);
+ }
+ if (fullViewKeyObj && fullViewKeyObj !== 0) {
+ Module.ccall('js_freeFullViewKey', null, ['number'], [fullViewKeyObj]);
+ }
+ if (incomingViewKeyObj && incomingViewKeyObj !== 0) {
+ Module.ccall(
+ 'js_freeIncomingViewKey',
+ null,
+ ['number'],
+ [incomingViewKeyObj],
+ );
+ }
+ if (addressObj && addressObj !== 0) {
+ Module.ccall('js_freeAddress', null, ['number'], [addressObj]);
+ }
+ if (recipientsVector) {
+ Module.ccall(
+ 'js_freeSparkSpendRecipientsVector',
+ null,
+ ['number'],
+ [recipientsVector],
+ );
+ }
+ if (coinsList) {
+ Module.ccall('js_freeSparkSpendCoinsList', null, ['number'], [coinsList]);
+ }
+ if (coverSetDataMap && coverSetDataMap !== 0) {
+ Module.ccall(
+ 'js_freeCoverSetDataMapForCreateSparkSpendTransaction',
+ null,
+ ['number'],
+ [coverSetDataMap],
+ );
+ }
+ if (privateRecipientsVector) {
+ Module.ccall(
+ 'js_freeSparkSpendPrivateRecipientsVector',
+ null,
+ ['number'],
+ [privateRecipientsVector],
+ );
+ }
+ if (idAndBlockHashesMap) {
+ Module.ccall(
+ 'js_freeIdAndBlockHashesMap',
+ null,
+ ['number'],
+ [idAndBlockHashesMap],
+ );
+ }
+ if (deserializedCoinObj) {
+ Module._free(deserializedCoinObj);
+ }
+ if (result && result !== 0) {
+ Module.ccall('js_freeCreateSparkSpendTxResult', null, ['number'], [result]);
+ }
+ if (coverSetData && coverSetData !== 0) {
+ Module.ccall('js_freeCoverSetData', null, ['number'], [coverSetData]);
+ }
+
+ return finalTx;
+}
diff --git a/packages/extension/src/libs/spark-handler/serializeMintContext.ts b/packages/extension/src/libs/spark-handler/serializeMintContext.ts
new file mode 100644
index 000000000..f234741c5
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/serializeMintContext.ts
@@ -0,0 +1,51 @@
+// Assuming DartInputData has the following structure
+interface DartInputData {
+ txHash: Buffer; // txHash is a buffer (equivalent to a byte array in C++)
+ txHashLength: number;
+ vout: number;
+}
+
+// Serialized result interface
+interface SerializedMintContextResult {
+ contextLength: number;
+ context: Buffer;
+}
+
+export const serializeMintContext = (
+ inputs: DartInputData[],
+): SerializedMintContextResult => {
+ const serialContextStream: Buffer[] = [];
+
+ for (let i = 0; i < inputs.length; i++) {
+ const txHashBuffer = inputs[i].txHash.slice(0, inputs[i].txHashLength);
+
+ // Manually serialize the input (txHash, vout, scriptSig, sequence)
+ const voutBuffer = Buffer.alloc(4);
+ voutBuffer.writeUInt32LE(inputs[i].vout, 0); // vout is an unsigned 32-bit integer
+
+ const sequenceBuffer = Buffer.alloc(4);
+ sequenceBuffer.writeUInt32LE(0xffffffff - 1, 0); // Max sequence number - 1
+
+ // Serialize the input as a concatenation of txHash + vout + scriptSig + sequence
+ const inputBuffer = Buffer.concat([
+ txHashBuffer,
+ voutBuffer,
+ Buffer.from([]),
+ sequenceBuffer,
+ ]);
+
+ // Append the serialized input to the context stream
+ serialContextStream.push(inputBuffer);
+ }
+
+ // Combine all buffers into one final buffer
+ const contextBuffer = Buffer.concat(serialContextStream);
+
+ // Return the result with context length and serialized context
+ const result: SerializedMintContextResult = {
+ contextLength: contextBuffer.length,
+ context: contextBuffer,
+ };
+
+ return result;
+};
diff --git a/packages/extension/src/libs/spark-handler/utils.ts b/packages/extension/src/libs/spark-handler/utils.ts
new file mode 100644
index 000000000..4d9fdce62
--- /dev/null
+++ b/packages/extension/src/libs/spark-handler/utils.ts
@@ -0,0 +1,23 @@
+export const numberToReversedHex = (num: number) => {
+ let hex = num.toString(16);
+
+ if (hex.length % 2 !== 0) {
+ hex = '0' + hex;
+ }
+
+ const bytes = hex.match(/.{2}/g);
+ const reversed = bytes?.reverse();
+
+ return reversed?.join('');
+};
+
+export const base64ToReversedHex = (base64: string): string => {
+ const binary = atob(base64);
+ const bytes = new Uint8Array(binary.length);
+
+ for (let i = 0; i < binary.length; i++) {
+ bytes[i] = binary.charCodeAt(i);
+ }
+ const reversed = Array.from(bytes).reverse();
+ return reversed.map(b => b.toString(16).padStart(2, '0')).join('');
+};
diff --git a/packages/extension/src/libs/utils/initialize-wallet.ts b/packages/extension/src/libs/utils/initialize-wallet.ts
index 8456ad8a3..0f1f2ae3b 100644
--- a/packages/extension/src/libs/utils/initialize-wallet.ts
+++ b/packages/extension/src/libs/utils/initialize-wallet.ts
@@ -27,6 +27,16 @@ export const initAccounts = async (keyring: KeyRing) => {
const ed25519massa = (
await getAccountsByNetworkName(NetworkNames.Massa)
).filter(acc => !acc.isTestWallet);
+ const secp256k1Firo = (
+ await getAccountsByNetworkName(NetworkNames.Firo)
+ ).filter(acc => !acc.isTestWallet);
+ if (secp256k1Firo.length == 0)
+ await keyring.saveNewAccount({
+ basePath: BitcoinNetworks.firo.basePath,
+ name: 'Firo Account 1',
+ signerType: BitcoinNetworks.firo.signer[0],
+ walletType: WalletType.mnemonic,
+ });
if (secp256k1.length == 0)
await keyring.saveNewAccount({
basePath: EthereumNetworks.ethereum.basePath,
diff --git a/packages/extension/src/libs/utils/number-formatter.ts b/packages/extension/src/libs/utils/number-formatter.ts
index 07e4812b3..5af2e7d89 100644
--- a/packages/extension/src/libs/utils/number-formatter.ts
+++ b/packages/extension/src/libs/utils/number-formatter.ts
@@ -313,6 +313,7 @@ const formatPercentageValue = (
_value: BigNumber | string | number,
): FormattedValue => {
/* Strip '%' if necessary */
+ // @ts-ignore
const value = new BigNumber(_value.toString().replaceAll('%', ''));
const unit = FormattedNumberUnit.PERCENT;
/**
diff --git a/packages/extension/src/libs/utils/updateAndSync/updateCoinSet.ts b/packages/extension/src/libs/utils/updateAndSync/updateCoinSet.ts
new file mode 100644
index 000000000..bd229eb8e
--- /dev/null
+++ b/packages/extension/src/libs/utils/updateAndSync/updateCoinSet.ts
@@ -0,0 +1,248 @@
+import { DB_DATA_KEYS, IndexedDBHelper } from '@action/db/indexedDB';
+import { PublicFiroWallet } from '@/providers/bitcoin/libs/firo-wallet/public-firo-wallet';
+import type { AnonymitySetMetaModel } from '@/providers/bitcoin/libs/electrum-client/abstract-electrum';
+import { differenceSets } from '@action/utils/set-utils';
+
+export type StoredAnonymitySet = {
+ coins: string[][];
+ blockHash: string;
+ setHash: string;
+};
+
+export type StoredCoin = {
+ coin: string[];
+ setId?: number;
+ tag?: string;
+};
+
+export type CoinSetUpdateResult = {
+ setId: number;
+ blockHash: string;
+ setHash: string;
+ newCoins: string[][];
+ containsMyCoins: boolean;
+ matchedCoins: string[][];
+};
+
+export type CoinSetSyncOptions = {
+ intervalMs?: number;
+ onUpdate?: (results: CoinSetUpdateResult[]) => void;
+ onMyCoinDetected?: (result: CoinSetUpdateResult) => void;
+ onError?: (error: unknown) => void;
+ onComplete?: () => void;
+};
+
+const wallet = new PublicFiroWallet();
+const db = new IndexedDBHelper();
+
+const getLocalSets = async (): Promise => {
+ try {
+ const data =
+ (await db.readData(DB_DATA_KEYS.sets)) ?? [];
+ if (Array.isArray(data)) {
+ return data;
+ } else {
+ return [];
+ }
+ } catch (error) {
+ console.warn(
+ 'updateCoinSet:getLocalSets',
+ 'Failed to read local sets',
+ error,
+ );
+ return [];
+ }
+};
+
+const getMyCoinHashes = async (): Promise> => {
+ try {
+ const myCoins = await db.readData('myCoins');
+ if (!Array.isArray(myCoins)) {
+ return new Set();
+ }
+ return new Set(
+ myCoins
+ .map(entry => entry?.coin?.[0])
+ .filter(
+ (hash): hash is string => typeof hash === 'string' && hash.length > 0,
+ ),
+ );
+ } catch (error) {
+ console.warn(
+ 'updateCoinSet:getMyCoinHashes',
+ 'Failed to read myCoins',
+ error,
+ );
+ return new Set();
+ }
+};
+
+const fetchNewCoinsForSet = async (
+ setId: number,
+ meta: AnonymitySetMetaModel,
+ localSet: StoredAnonymitySet | undefined,
+): Promise<{ coins: string[][]; isFullReplacement: boolean }> => {
+ const localCoinsCount = localSet?.coins?.length ?? 0;
+
+ if (!localSet || meta.size <= localCoinsCount + 1) {
+ const [firstChunk, secondChunk] = await Promise.all([
+ wallet.fetchAnonymitySetSector(
+ setId,
+ meta.blockHash,
+ 0,
+ Math.floor(meta.size / 2),
+ ),
+ wallet.fetchAnonymitySetSector(
+ setId,
+ meta.blockHash,
+ Math.floor(meta.size / 2),
+ meta.size,
+ ),
+ ]);
+ const coins = [...firstChunk.coins, ...secondChunk.coins];
+ return { coins, isFullReplacement: true };
+ }
+
+ const sector = await wallet.fetchAnonymitySetSector(
+ setId,
+ meta.blockHash,
+ localCoinsCount,
+ meta.size,
+ );
+
+ const coins = sector.coins ?? [];
+ return { coins, isFullReplacement: false };
+};
+
+export const syncCoinSetsOnce = async (): Promise => {
+ const [remoteMetas, localSets, myCoinHashes] = await Promise.all([
+ wallet.getAllSparkAnonymitySetMeta(),
+ getLocalSets(),
+ getMyCoinHashes(),
+ ]);
+
+ if (!Array.isArray(remoteMetas) || remoteMetas.length === 0) {
+ return [];
+ }
+
+ const updatesList = await Promise.all(
+ remoteMetas.map(async (remoteMeta, index) => {
+ const setId = index + 1;
+ const localSet = localSets[index];
+
+ const hasLocalSet = Boolean(localSet);
+ const localCoinCount = localSet?.coins?.length ?? 0;
+
+ if (hasLocalSet) {
+ const hashesMatch = remoteMeta.setHash === localSet!.setHash;
+ const sizesMatch = remoteMeta.size === localCoinCount;
+
+ if (hashesMatch && sizesMatch) {
+ return null;
+ }
+ }
+
+ const { coins: newCoins, isFullReplacement } = await fetchNewCoinsForSet(
+ setId,
+ remoteMeta,
+ localSet,
+ );
+
+ if (!newCoins.length) {
+ return null;
+ }
+
+ const updatedCoinsSet = differenceSets(
+ new Set(localSets?.[index]?.coins ?? []),
+ new Set(newCoins),
+ );
+ localSets[index] = {
+ blockHash: remoteMeta.blockHash,
+ setHash: remoteMeta.setHash,
+ coins: Array.from(updatedCoinsSet),
+ };
+
+ if (!localSets[index] || isFullReplacement) {
+ await db.saveData(DB_DATA_KEYS.sets, localSets);
+ } else {
+ await db.appendSetData(DB_DATA_KEYS.sets, index, {
+ ...localSets[index],
+ });
+ }
+
+ const matchedCoins = newCoins.filter(coin =>
+ myCoinHashes.has(coin?.[0] ?? ''),
+ );
+
+ return {
+ setId,
+ blockHash: remoteMeta.blockHash,
+ setHash: remoteMeta.setHash,
+ newCoins,
+ containsMyCoins: matchedCoins.length > 0,
+ matchedCoins,
+ };
+ }),
+ );
+
+ return updatesList.filter(Boolean) as CoinSetUpdateResult[];
+};
+
+export const startCoinSetSync = (options?: CoinSetSyncOptions) => {
+ const intervalMs = options?.intervalMs ?? 60_000;
+ let stopped = false;
+ let timer: ReturnType | null = null;
+ let isRunning = false;
+ let hasCompletedOnce = false;
+
+ const fireCompleteOnce = () => {
+ if (!hasCompletedOnce) {
+ hasCompletedOnce = true;
+ options?.onComplete?.();
+ }
+ };
+
+ const scheduleNext = () => {
+ if (stopped) return;
+ timer = setTimeout(run, intervalMs);
+ };
+
+ const run = async () => {
+ if (isRunning) {
+ scheduleNext();
+ return;
+ }
+
+ isRunning = true;
+ try {
+ const updates = await syncCoinSetsOnce();
+
+ if (updates.length) {
+ options?.onUpdate?.(updates);
+
+ updates
+ .filter(update => update.containsMyCoins)
+ .forEach(update => options?.onMyCoinDetected?.(update));
+ }
+
+ fireCompleteOnce();
+ } catch (error) {
+ options?.onError?.(error);
+
+ fireCompleteOnce();
+ } finally {
+ isRunning = false;
+ scheduleNext();
+ }
+ };
+
+ run();
+
+ return () => {
+ stopped = true;
+ if (timer) {
+ clearTimeout(timer);
+ timer = null;
+ }
+ };
+};
diff --git a/packages/extension/src/libs/utils/updateAndSync/updateTagsSet.ts b/packages/extension/src/libs/utils/updateAndSync/updateTagsSet.ts
new file mode 100644
index 000000000..b1cf846dd
--- /dev/null
+++ b/packages/extension/src/libs/utils/updateAndSync/updateTagsSet.ts
@@ -0,0 +1,104 @@
+import { DB_DATA_KEYS, IndexedDBHelper } from '@action/db/indexedDB';
+import { PublicFiroWallet } from '@/providers/bitcoin/libs/firo-wallet/public-firo-wallet';
+import { differenceSets } from '@action/utils/set-utils';
+
+type SetsUpdateResult = {
+ tags: string[];
+};
+
+export type TagsSyncOptions = {
+ intervalMs?: number;
+ onUpdate?: (results: SetsUpdateResult) => void;
+ onError?: (error: unknown) => void;
+ onComplete?: () => void;
+};
+
+const wallet = new PublicFiroWallet();
+const db = new IndexedDBHelper();
+
+const syncTagsOnce = async (): Promise => {
+ try {
+ const localTags = await db.readData<{ tags: string[] }>(
+ DB_DATA_KEYS.usedCoinsTags,
+ );
+
+ const updates = await wallet.getUsedSparkCoinsTags(
+ !!localTags?.tags?.length ? localTags?.tags?.length : 0,
+ );
+
+ const diffTags = differenceSets(
+ new Set(updates?.tags ?? []),
+ new Set(localTags?.tags ?? []),
+ );
+
+ const mergedTags = Array.from(
+ new Set([...(diffTags.values() ?? []), ...(updates?.tags ?? [])]),
+ );
+ await db.saveData(DB_DATA_KEYS.usedCoinsTags, { tags: mergedTags });
+
+ // Prevent sending updates if there are no new tags
+ if (mergedTags.length === localTags?.tags?.length) {
+ return { tags: [] };
+ }
+
+ return updates;
+ } catch (error) {
+ throw error;
+ }
+};
+
+export const startTagSetSync = (options?: TagsSyncOptions) => {
+ const intervalMs = options?.intervalMs ?? 60_000;
+ let stopped = false;
+ let timer: ReturnType | null = null;
+ let isRunning = false;
+ let hasCompletedOnce = false;
+
+ const fireCompleteOnce = () => {
+ if (!hasCompletedOnce) {
+ hasCompletedOnce = true;
+ options?.onComplete?.();
+ }
+ };
+
+ const scheduleNext = () => {
+ if (stopped) return;
+ timer = setTimeout(run, intervalMs);
+ };
+
+ const run = async () => {
+ if (isRunning) {
+ scheduleNext();
+ return;
+ }
+
+ isRunning = true;
+ try {
+ const updates = await syncTagsOnce();
+
+ if (updates?.tags?.length) {
+ options?.onUpdate?.(updates);
+ } else {
+ }
+
+ fireCompleteOnce();
+ } catch (error) {
+ options?.onError?.(error);
+
+ fireCompleteOnce();
+ } finally {
+ isRunning = false;
+ scheduleNext();
+ }
+ };
+
+ run();
+
+ return () => {
+ stopped = true;
+ if (timer) {
+ clearTimeout(timer);
+ timer = null;
+ }
+ };
+};
diff --git a/packages/extension/src/libs/utils/wasm-loader.ts b/packages/extension/src/libs/utils/wasm-loader.ts
new file mode 100644
index 000000000..9225dd417
--- /dev/null
+++ b/packages/extension/src/libs/utils/wasm-loader.ts
@@ -0,0 +1,70 @@
+import initSpark from './wasmModule/spark.js';
+
+export declare const Module: any;
+
+declare global {
+ interface WasmModule {
+ ccall: (
+ name: string,
+ returnType: any,
+ argumentTypes: string[],
+ args: any[],
+ ) => any;
+ _free: (d: any) => any;
+ _malloc: (d: number) => any;
+ _js_createMintedCoinData: (...args: any) => any;
+ _js_createSparkMintRecipients: (...args: any) => any;
+ _js_getRecipientVectorLength: (...args: any) => any;
+ _js_getRecipientAt: (...args: any) => any;
+ _js_getRecipientScriptPubKeySize: (...args: any) => any;
+ _js_getRecipientScriptPubKey: (...args: any) => any;
+ _js_getRecipientAmount: (...args: any) => any;
+ _js_getRecipientSubtractFeeFromAmountFlag: (...args: any) => any;
+ UTF8ToString: (...args: any) => any;
+ HEAPU8: Uint8Array;
+ HEAP8: Int8Array;
+ HEAP16: Int16Array;
+ HEAPU16: Uint16Array;
+ HEAP32: Int32Array;
+ HEAPU32: Uint32Array;
+ HEAPF32: Float32Array;
+ }
+}
+
+async function loadWasm(): Promise {
+ return new Promise(async (resolve, reject) => {
+ const wasmModule = await initSpark();
+
+ if (typeof wasmModule !== 'undefined') {
+ resolve(wasmModule);
+ } else {
+ reject(new Error('Failed to load WASM module.'));
+ }
+ });
+}
+
+class WasmInstance {
+ instance: WasmModule | null = null;
+
+ constructor() {
+ loadWasm()
+ .then(module => {
+ this.instance = module;
+ })
+ .catch(error => {
+ console.error('Error loading WASM module:', error);
+ });
+ }
+
+ public async getInstance(): Promise {
+ if (this.instance) {
+ return Promise.resolve(this.instance);
+ } else {
+ const wasm = await loadWasm();
+ this.instance = wasm;
+ return Promise.resolve(wasm);
+ }
+ }
+}
+
+export const wasmInstance = new WasmInstance();
diff --git a/packages/extension/src/libs/utils/wasm-worker-loader.ts b/packages/extension/src/libs/utils/wasm-worker-loader.ts
new file mode 100644
index 000000000..a8bc658f2
--- /dev/null
+++ b/packages/extension/src/libs/utils/wasm-worker-loader.ts
@@ -0,0 +1,70 @@
+import initSpark from './wasmWorkerModule/spark.js';
+
+export declare const Module: any;
+
+declare global {
+ interface WasmModule {
+ ccall: (
+ name: string,
+ returnType: any,
+ argumentTypes: string[],
+ args: any[],
+ ) => any;
+ _free: (d: any) => any;
+ _malloc: (d: number) => any;
+ _js_createMintedCoinData: (...args: any) => any;
+ _js_createSparkMintRecipients: (...args: any) => any;
+ _js_getRecipientVectorLength: (...args: any) => any;
+ _js_getRecipientAt: (...args: any) => any;
+ _js_getRecipientScriptPubKeySize: (...args: any) => any;
+ _js_getRecipientScriptPubKey: (...args: any) => any;
+ _js_getRecipientAmount: (...args: any) => any;
+ _js_getRecipientSubtractFeeFromAmountFlag: (...args: any) => any;
+ UTF8ToString: (...args: any) => any;
+ HEAPU8: Uint8Array;
+ HEAP8: Int8Array;
+ HEAP16: Int16Array;
+ HEAPU16: Uint16Array;
+ HEAP32: Int32Array;
+ HEAPU32: Uint32Array;
+ HEAPF32: Float32Array;
+ }
+}
+
+async function loadWasm(): Promise {
+ return new Promise(async (resolve, reject) => {
+ const wasmModule = await initSpark();
+
+ if (typeof wasmModule !== 'undefined') {
+ resolve(wasmModule);
+ } else {
+ reject(new Error('Failed to load WASM module.'));
+ }
+ });
+}
+
+class WasmWorkerInstance {
+ instance: WasmModule | null = null;
+
+ constructor() {
+ loadWasm()
+ .then(module => {
+ this.instance = module;
+ })
+ .catch(error => {
+ console.error('Error loading WASM module:', error);
+ });
+ }
+
+ public async getInstance(): Promise {
+ if (this.instance) {
+ return Promise.resolve(this.instance);
+ } else {
+ const wasm = await loadWasm();
+ this.instance = wasm;
+ return Promise.resolve(wasm);
+ }
+ }
+}
+
+export const wasmWorkerInstance = new WasmWorkerInstance();
diff --git a/packages/extension/src/libs/utils/wasmModule/spark.js b/packages/extension/src/libs/utils/wasmModule/spark.js
new file mode 100644
index 000000000..bd2f84f1d
--- /dev/null
+++ b/packages/extension/src/libs/utils/wasmModule/spark.js
@@ -0,0 +1,4877 @@
+var Module = (() => {
+ var _scriptName = import.meta.url;
+
+ return (
+async function(moduleArg = {}) {
+ var moduleRtn;
+
+// include: shell.js
+// The Module object: Our interface to the outside world. We import
+// and export values on it. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(moduleArg) => Promise
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to check if Module already exists (e.g. case 3 above).
+// Substitution will be replaced with actual code on later stage of the build,
+// this way Closure Compiler will not mangle it (e.g. case 4. above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module = moduleArg;
+
+// Set up the promise that indicates the Module is initialized
+var readyPromiseResolve, readyPromiseReject;
+var readyPromise = new Promise((resolve, reject) => {
+ readyPromiseResolve = resolve;
+ readyPromiseReject = reject;
+});
+
+// Determine the runtime environment we are in. You can customize this by
+// setting the ENVIRONMENT setting at compile time (see settings.js).
+
+// Attempt to auto-detect the environment
+var ENVIRONMENT_IS_WEB = typeof window == 'object';
+var ENVIRONMENT_IS_WORKER = typeof WorkerGlobalScope != 'undefined';
+// N.b. Electron.js environment is simultaneously a NODE-environment, but
+// also a web environment.
+var ENVIRONMENT_IS_NODE = typeof process == 'object' && typeof process.versions == 'object' && typeof process.versions.node == 'string' && process.type != 'renderer';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // When building an ES module `require` is not normally available.
+ // We need to use `createRequire()` to construct the require()` function.
+ const { createRequire } = await import('module');
+ /** @suppress{duplicate} */
+ var require = createRequire('/');
+
+}
+
+// --pre-jses are emitted after the Module integration code, so that they can
+// refer to Module (if they choose; they can also define Module)
+
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = Object.assign({}, Module);
+
+var arguments_ = [];
+var thisProgram = './this.program';
+var quit_ = (status, toThrow) => {
+ throw toThrow;
+};
+
+// `/` should be present at the end if `scriptDirectory` is not empty
+var scriptDirectory = '';
+function locateFile(path) {
+ if (Module['locateFile']) {
+ return Module['locateFile'](path, scriptDirectory);
+ }
+ return scriptDirectory + path;
+}
+
+// Hooks that are implemented differently in different runtime environments.
+var readAsync, readBinary;
+
+if (ENVIRONMENT_IS_NODE) {
+
+ // These modules will usually be used on Node.js. Load them eagerly to avoid
+ // the complexity of lazy-loading.
+ var fs = require('fs');
+ var nodePath = require('path');
+
+ // EXPORT_ES6 + ENVIRONMENT_IS_NODE always requires use of import.meta.url,
+ // since there's no way getting the current absolute path of the module when
+ // support for that is not available.
+ if (!import.meta.url.startsWith('data:')) {
+ scriptDirectory = nodePath.dirname(require('url').fileURLToPath(import.meta.url)) + '/';
+ }
+
+// include: node_shell_read.js
+readBinary = (filename) => {
+ // We need to re-wrap `file://` strings to URLs.
+ filename = isFileURI(filename) ? new URL(filename) : filename;
+ var ret = fs.readFileSync(filename);
+ return ret;
+};
+
+readAsync = async (filename, binary = true) => {
+ // See the comment in the `readBinary` function.
+ filename = isFileURI(filename) ? new URL(filename) : filename;
+ var ret = fs.readFileSync(filename, binary ? undefined : 'utf8');
+ return ret;
+};
+// end include: node_shell_read.js
+ if (!Module['thisProgram'] && process.argv.length > 1) {
+ thisProgram = process.argv[1].replace(/\\/g, '/');
+ }
+
+ arguments_ = process.argv.slice(2);
+
+ // MODULARIZE will export the module in the proper place outside, we don't need to export here
+
+ quit_ = (status, toThrow) => {
+ process.exitCode = status;
+ throw toThrow;
+ };
+
+} else
+
+// Note that this includes Node.js workers when relevant (pthreads is enabled).
+// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and
+// ENVIRONMENT_IS_NODE.
+if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ if (ENVIRONMENT_IS_WORKER) { // Check worker, not web, since window could be polyfilled
+ scriptDirectory = self.location.href;
+ } else if (typeof document != 'undefined' && document.currentScript) { // web
+ scriptDirectory = document.currentScript.src;
+ }
+ // When MODULARIZE, this JS may be executed later, after document.currentScript
+ // is gone, so we saved it, and we use it here instead of any other info.
+ if (_scriptName) {
+ scriptDirectory = _scriptName;
+ }
+ // blob urls look like blob:http://site.com/etc/etc and we cannot infer anything from them.
+ // otherwise, slice off the final part of the url to find the script directory.
+ // if scriptDirectory does not contain a slash, lastIndexOf will return -1,
+ // and scriptDirectory will correctly be replaced with an empty string.
+ // If scriptDirectory contains a query (starting with ?) or a fragment (starting with #),
+ // they are removed because they could contain a slash.
+ if (scriptDirectory.startsWith('blob:')) {
+ scriptDirectory = '';
+ } else {
+ scriptDirectory = scriptDirectory.substr(0, scriptDirectory.replace(/[?#].*/, '').lastIndexOf('/')+1);
+ }
+
+ {
+// include: web_or_worker_shell_read.js
+if (ENVIRONMENT_IS_WORKER) {
+ readBinary = (url) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.responseType = 'arraybuffer';
+ xhr.send(null);
+ return new Uint8Array(/** @type{!ArrayBuffer} */(xhr.response));
+ };
+ }
+
+ readAsync = async (url) => {
+ // Fetch has some additional restrictions over XHR, like it can't be used on a file:// url.
+ // See https://github.com/github/fetch/pull/92#issuecomment-140665932
+ // Cordova or Electron apps are typically loaded from a file:// url.
+ // So use XHR on webview if URL is a file URL.
+ if (isFileURI(url)) {
+ return new Promise((resolve, reject) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = () => {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ resolve(xhr.response);
+ return;
+ }
+ reject(xhr.status);
+ };
+ xhr.onerror = reject;
+ xhr.send(null);
+ });
+ }
+ var response = await fetch(url, { credentials: 'same-origin' });
+ if (response.ok) {
+ return response.arrayBuffer();
+ }
+ throw new Error(response.status + ' : ' + response.url);
+ };
+// end include: web_or_worker_shell_read.js
+ }
+} else
+{
+}
+
+var out = Module['print'] || console.log.bind(console);
+var err = Module['printErr'] || console.error.bind(console);
+
+// Merge back in the overrides
+Object.assign(Module, moduleOverrides);
+// Free the object hierarchy contained in the overrides, this lets the GC
+// reclaim data used.
+moduleOverrides = null;
+
+// Emit code to handle expected values on the Module object. This applies Module.x
+// to the proper local x. This has two benefits: first, we only emit it if it is
+// expected to arrive, and second, by using a local everywhere else that can be
+// minified.
+
+if (Module['arguments']) arguments_ = Module['arguments'];
+
+if (Module['thisProgram']) thisProgram = Module['thisProgram'];
+
+// perform assertions in shell.js after we set up out() and err(), as otherwise if an assertion fails it cannot print the message
+// end include: shell.js
+
+// include: preamble.js
+// === Preamble library stuff ===
+
+// Documentation for the public APIs defined in this file must be updated in:
+// site/source/docs/api_reference/preamble.js.rst
+// A prebuilt local version of the documentation is available at:
+// site/build/text/docs/api_reference/preamble.js.txt
+// You can also build docs locally as HTML or other formats in site/
+// An online HTML version (which may be of a different version of Emscripten)
+// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html
+
+var wasmBinary = Module['wasmBinary'];
+
+// Wasm globals
+
+var wasmMemory;
+
+//========================================
+// Runtime essentials
+//========================================
+
+// whether we are quitting the application. no code should run after this.
+// set in exit() and abort()
+var ABORT = false;
+
+// set by exit() and abort(). Passed to 'onExit' handler.
+// NOTE: This is also used as the process return code code in shell environments
+// but only when noExitRuntime is false.
+var EXITSTATUS;
+
+// In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we
+// don't define it at all in release modes. This matches the behaviour of
+// MINIMAL_RUNTIME.
+// TODO(sbc): Make this the default even without STRICT enabled.
+/** @type {function(*, string=)} */
+function assert(condition, text) {
+ if (!condition) {
+ // This build was created without ASSERTIONS defined. `assert()` should not
+ // ever be called in this configuration but in case there are callers in
+ // the wild leave this simple abort() implementation here for now.
+ abort(text);
+ }
+}
+
+// Memory management
+
+var HEAP,
+/** @type {!Int8Array} */
+ HEAP8,
+/** @type {!Uint8Array} */
+ HEAPU8,
+/** @type {!Int16Array} */
+ HEAP16,
+/** @type {!Uint16Array} */
+ HEAPU16,
+/** @type {!Int32Array} */
+ HEAP32,
+/** @type {!Uint32Array} */
+ HEAPU32,
+/** @type {!Float32Array} */
+ HEAPF32,
+/* BigInt64Array type is not correctly defined in closure
+/** not-@type {!BigInt64Array} */
+ HEAP64,
+/* BigUint64Array type is not correctly defined in closure
+/** not-t@type {!BigUint64Array} */
+ HEAPU64,
+/** @type {!Float64Array} */
+ HEAPF64;
+
+var runtimeInitialized = false;
+
+// include: URIUtils.js
+// Prefix of data URIs emitted by SINGLE_FILE and related options.
+var dataURIPrefix = 'data:application/octet-stream;base64,';
+
+/**
+ * Indicates whether filename is a base64 data URI.
+ * @noinline
+ */
+var isDataURI = (filename) => filename.startsWith(dataURIPrefix);
+
+/**
+ * Indicates whether filename is delivered via file protocol (as opposed to http/https)
+ * @noinline
+ */
+var isFileURI = (filename) => filename.startsWith('file://');
+// end include: URIUtils.js
+// include: runtime_shared.js
+// include: runtime_stack_check.js
+// end include: runtime_stack_check.js
+// include: runtime_exceptions.js
+// end include: runtime_exceptions.js
+// include: runtime_debug.js
+// end include: runtime_debug.js
+// include: memoryprofiler.js
+// end include: memoryprofiler.js
+
+
+function updateMemoryViews() {
+ var b = wasmMemory.buffer;
+ Module['HEAP8'] = HEAP8 = new Int8Array(b);
+ Module['HEAP16'] = HEAP16 = new Int16Array(b);
+ Module['HEAPU8'] = HEAPU8 = new Uint8Array(b);
+ Module['HEAPU16'] = HEAPU16 = new Uint16Array(b);
+ Module['HEAP32'] = HEAP32 = new Int32Array(b);
+ Module['HEAPU32'] = HEAPU32 = new Uint32Array(b);
+ Module['HEAPF32'] = HEAPF32 = new Float32Array(b);
+ Module['HEAPF64'] = HEAPF64 = new Float64Array(b);
+ Module['HEAP64'] = HEAP64 = new BigInt64Array(b);
+ Module['HEAPU64'] = HEAPU64 = new BigUint64Array(b);
+}
+
+// end include: runtime_shared.js
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the main() is called
+
+function preRun() {
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function initRuntime() {
+ runtimeInitialized = true;
+
+
+if (!Module['noFSInit'] && !FS.initialized)
+ FS.init();
+FS.ignorePermissions = false;
+
+TTY.init();
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function postRun() {
+
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+
+function addOnExit(cb) {
+}
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// Module.preRun (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function getUniqueRunDependency(id) {
+ return id;
+}
+
+function addRunDependency(id) {
+ runDependencies++;
+
+ Module['monitorRunDependencies']?.(runDependencies);
+
+}
+
+function removeRunDependency(id) {
+ runDependencies--;
+
+ Module['monitorRunDependencies']?.(runDependencies);
+
+ if (runDependencies == 0) {
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+
+/** @param {string|number=} what */
+function abort(what) {
+ Module['onAbort']?.(what);
+
+ what = 'Aborted(' + what + ')';
+ // TODO(sbc): Should we remove printing and leave it up to whoever
+ // catches the exception?
+ err(what);
+
+ ABORT = true;
+
+ what += '. Build with -sASSERTIONS for more info.';
+
+ // Use a wasm runtime error, because a JS error might be seen as a foreign
+ // exception, which means we'd run destructors on it. We need the error to
+ // simply make the program stop.
+ // FIXME This approach does not work in Wasm EH because it currently does not assume
+ // all RuntimeErrors are from traps; it decides whether a RuntimeError is from
+ // a trap or not based on a hidden field within the object. So at the moment
+ // we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that
+ // allows this in the wasm spec.
+
+ // Suppress closure compiler warning here. Closure compiler's builtin extern
+ // definition for WebAssembly.RuntimeError claims it takes no arguments even
+ // though it can.
+ // TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed.
+ /** @suppress {checkTypes} */
+ var e = new WebAssembly.RuntimeError('WASM ERROR: ' + what, 'unknown file', 0);
+
+ readyPromiseReject(e);
+ // Throw the error whether or not MODULARIZE is set because abort is used
+ // in code paths apart from instantiation where an exception is expected
+ // to be thrown when abort is called.
+ throw e;
+}
+
+var wasmBinaryFile;
+function findWasmBinary() {
+ if (Module['locateFile']) {
+ var f = 'spark.wasm';
+ if (!isDataURI(f)) {
+ return locateFile(f);
+ }
+ return f;
+ }
+ // Use bundler-friendly `new URL(..., import.meta.url)` pattern; works in browsers too.
+ return new URL('spark.wasm', import.meta.url).href;
+}
+
+function getBinarySync(file) {
+ if (file == wasmBinaryFile && wasmBinary) {
+ return new Uint8Array(wasmBinary);
+ }
+ if (readBinary) {
+ return readBinary(file);
+ }
+ throw 'both async and sync fetching of the wasm failed';
+}
+
+async function getWasmBinary(binaryFile) {
+ // If we don't have the binary yet, load it asynchronously using readAsync.
+ if (!wasmBinary
+ ) {
+ // Fetch the binary using readAsync
+ try {
+ var response = await readAsync(binaryFile);
+ return new Uint8Array(response);
+ } catch {
+ // Fall back to getBinarySync below;
+ }
+ }
+
+ // Otherwise, getBinarySync should be able to get it synchronously
+ return getBinarySync(binaryFile);
+}
+
+async function instantiateArrayBuffer(binaryFile, imports) {
+ try {
+ var binary = await getWasmBinary(binaryFile);
+ var instance = await WebAssembly.instantiate(binary, imports);
+ return instance;
+ } catch (reason) {
+ err(`failed to asynchronously prepare wasm: ${reason}`);
+
+ abort(reason);
+ }
+}
+
+async function instantiateAsync(binary, binaryFile, imports) {
+ if (!binary &&
+ typeof WebAssembly.instantiateStreaming == 'function' &&
+ !isDataURI(binaryFile)
+ // Don't use streaming for file:// delivered objects in a webview, fetch them synchronously.
+ && !isFileURI(binaryFile)
+ // Avoid instantiateStreaming() on Node.js environment for now, as while
+ // Node.js v18.1.0 implements it, it does not have a full fetch()
+ // implementation yet.
+ //
+ // Reference:
+ // https://github.com/emscripten-core/emscripten/pull/16917
+ && !ENVIRONMENT_IS_NODE
+ ) {
+ try {
+ var response = fetch(binaryFile, { credentials: 'same-origin' });
+ var instantiationResult = await WebAssembly.instantiateStreaming(response, imports);
+ return instantiationResult;
+ } catch (reason) {
+ // We expect the most common failure cause to be a bad MIME type for the binary,
+ // in which case falling back to ArrayBuffer instantiation should work.
+ err(`wasm streaming compile failed: ${reason}`);
+ err('falling back to ArrayBuffer instantiation');
+ // fall back of instantiateArrayBuffer below
+ };
+ }
+ return instantiateArrayBuffer(binaryFile, imports);
+}
+
+function getWasmImports() {
+ // prepare imports
+ return {
+ 'env': wasmImports,
+ 'wasi_snapshot_preview1': wasmImports,
+ }
+}
+
+// Create the wasm instance.
+// Receives the wasm imports, returns the exports.
+async function createWasm() {
+ // Load the wasm module and create an instance of using native support in the JS engine.
+ // handle a generated wasm instance, receiving its exports and
+ // performing other necessary setup
+ /** @param {WebAssembly.Module=} module*/
+ function receiveInstance(instance, module) {
+ wasmExports = instance.exports;
+
+
+
+ wasmMemory = wasmExports['memory'];
+
+ updateMemoryViews();
+
+ wasmTable = wasmExports['__indirect_function_table'];
+
+
+ addOnInit(wasmExports['__wasm_call_ctors']);
+
+ removeRunDependency('wasm-instantiate');
+ return wasmExports;
+ }
+ // wait for the pthread pool (if any)
+ addRunDependency('wasm-instantiate');
+
+ // Prefer streaming instantiation if available.
+ function receiveInstantiationResult(result) {
+ // 'result' is a ResultObject object which has both the module and instance.
+ // receiveInstance() will swap in the exports (to Module.asm) so they can be called
+ // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line.
+ // When the regression is fixed, can restore the above PTHREADS-enabled path.
+ return receiveInstance(result['instance']);
+ }
+
+ var info = getWasmImports();
+
+ // User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback
+ // to manually instantiate the Wasm module themselves. This allows pages to
+ // run the instantiation parallel to any other async startup actions they are
+ // performing.
+ // Also pthreads and wasm workers initialize the wasm instance through this
+ // path.
+ if (Module['instantiateWasm']) {
+ try {
+ return Module['instantiateWasm'](info, receiveInstance);
+ } catch(e) {
+ err(`Module.instantiateWasm callback failed with error: ${e}`);
+ // If instantiation fails, reject the module ready promise.
+ readyPromiseReject(e);
+ }
+ }
+
+ wasmBinaryFile ??= findWasmBinary();
+
+ try {
+ var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info);
+ var exports = receiveInstantiationResult(result);
+ return exports;
+ } catch (e) {
+ // If instantiation fails, reject the module ready promise.
+ readyPromiseReject(e);
+ return Promise.reject(e);
+ }
+}
+
+// === Body ===
+// end include: preamble.js
+
+
+ class ExitStatus {
+ name = 'ExitStatus';
+ constructor(status) {
+ this.message = `Program terminated with exit(${status})`;
+ this.status = status;
+ }
+ }
+
+ var callRuntimeCallbacks = (callbacks) => {
+ while (callbacks.length > 0) {
+ // Pass the module as the first argument.
+ callbacks.shift()(Module);
+ }
+ };
+
+
+ /**
+ * @param {number} ptr
+ * @param {string} type
+ */
+ function getValue(ptr, type = 'i8') {
+ if (type.endsWith('*')) type = '*';
+ switch (type) {
+ case 'i1': return HEAP8[ptr];
+ case 'i8': return HEAP8[ptr];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP64[((ptr)>>3)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ case '*': return HEAPU32[((ptr)>>2)];
+ default: abort(`invalid type for getValue: ${type}`);
+ }
+ }
+
+ var noExitRuntime = Module['noExitRuntime'] || true;
+
+
+ /**
+ * @param {number} ptr
+ * @param {number} value
+ * @param {string} type
+ */
+ function setValue(ptr, value, type = 'i8') {
+ if (type.endsWith('*')) type = '*';
+ switch (type) {
+ case 'i1': HEAP8[ptr] = value; break;
+ case 'i8': HEAP8[ptr] = value; break;
+ case 'i16': HEAP16[((ptr)>>1)] = value; break;
+ case 'i32': HEAP32[((ptr)>>2)] = value; break;
+ case 'i64': HEAP64[((ptr)>>3)] = BigInt(value); break;
+ case 'float': HEAPF32[((ptr)>>2)] = value; break;
+ case 'double': HEAPF64[((ptr)>>3)] = value; break;
+ case '*': HEAPU32[((ptr)>>2)] = value; break;
+ default: abort(`invalid type for setValue: ${type}`);
+ }
+ }
+
+ var stackRestore = (val) => __emscripten_stack_restore(val);
+
+ var stackSave = () => _emscripten_stack_get_current();
+
+ var UTF8Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder() : undefined;
+
+ /**
+ * Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given
+ * array that contains uint8 values, returns a copy of that string as a
+ * Javascript String object.
+ * heapOrArray is either a regular array, or a JavaScript typed array view.
+ * @param {number=} idx
+ * @param {number=} maxBytesToRead
+ * @return {string}
+ */
+ var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead = NaN) => {
+ var endIdx = idx + maxBytesToRead;
+ var endPtr = idx;
+ // TextDecoder needs to know the byte length in advance, it doesn't stop on
+ // null terminator by itself. Also, use the length info to avoid running tiny
+ // strings through TextDecoder, since .subarray() allocates garbage.
+ // (As a tiny code save trick, compare endPtr against endIdx using a negation,
+ // so that undefined/NaN means Infinity)
+ while (heapOrArray[endPtr] && !(endPtr >= endIdx)) ++endPtr;
+
+ if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) {
+ return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr));
+ }
+ var str = '';
+ // If building with TextDecoder, we have already computed the string length
+ // above, so test loop end condition against that
+ while (idx < endPtr) {
+ // For UTF8 byte structure, see:
+ // http://en.wikipedia.org/wiki/UTF-8#Description
+ // https://www.ietf.org/rfc/rfc2279.txt
+ // https://tools.ietf.org/html/rfc3629
+ var u0 = heapOrArray[idx++];
+ if (!(u0 & 0x80)) { str += String.fromCharCode(u0); continue; }
+ var u1 = heapOrArray[idx++] & 63;
+ if ((u0 & 0xE0) == 0xC0) { str += String.fromCharCode(((u0 & 31) << 6) | u1); continue; }
+ var u2 = heapOrArray[idx++] & 63;
+ if ((u0 & 0xF0) == 0xE0) {
+ u0 = ((u0 & 15) << 12) | (u1 << 6) | u2;
+ } else {
+ u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63);
+ }
+
+ if (u0 < 0x10000) {
+ str += String.fromCharCode(u0);
+ } else {
+ var ch = u0 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ }
+ }
+ return str;
+ };
+
+ /**
+ * Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the
+ * emscripten HEAP, returns a copy of that string as a Javascript String object.
+ *
+ * @param {number} ptr
+ * @param {number=} maxBytesToRead - An optional length that specifies the
+ * maximum number of bytes to read. You can omit this parameter to scan the
+ * string until the first 0 byte. If maxBytesToRead is passed, and the string
+ * at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the
+ * string will cut short at that byte index (i.e. maxBytesToRead will not
+ * produce a string of exact length [ptr, ptr+maxBytesToRead[) N.B. mixing
+ * frequent uses of UTF8ToString() with and without maxBytesToRead may throw
+ * JS JIT optimizations off, so it is worth to consider consistently using one
+ * @return {string}
+ */
+ var UTF8ToString = (ptr, maxBytesToRead) => {
+ return ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead) : '';
+ };
+ var ___assert_fail = (condition, filename, line, func) =>
+ abort(`Assertion failed: ${UTF8ToString(condition)}, at: ` + [filename ? UTF8ToString(filename) : 'unknown filename', line, func ? UTF8ToString(func) : 'unknown function']);
+
+ var exceptionCaught = [];
+
+
+
+ var uncaughtExceptionCount = 0;
+ var ___cxa_begin_catch = (ptr) => {
+ var info = new ExceptionInfo(ptr);
+ if (!info.get_caught()) {
+ info.set_caught(true);
+ uncaughtExceptionCount--;
+ }
+ info.set_rethrown(false);
+ exceptionCaught.push(info);
+ ___cxa_increment_exception_refcount(ptr);
+ return ___cxa_get_exception_ptr(ptr);
+ };
+
+
+ var exceptionLast = 0;
+
+
+ var ___cxa_end_catch = () => {
+ // Clear state flag.
+ _setThrew(0, 0);
+ // Call destructor if one is registered then clear it.
+ var info = exceptionCaught.pop();
+
+ ___cxa_decrement_exception_refcount(info.excPtr);
+ exceptionLast = 0; // XXX in decRef?
+ };
+
+
+ class ExceptionInfo {
+ // excPtr - Thrown object pointer to wrap. Metadata pointer is calculated from it.
+ constructor(excPtr) {
+ this.excPtr = excPtr;
+ this.ptr = excPtr - 24;
+ }
+
+ set_type(type) {
+ HEAPU32[(((this.ptr)+(4))>>2)] = type;
+ }
+
+ get_type() {
+ return HEAPU32[(((this.ptr)+(4))>>2)];
+ }
+
+ set_destructor(destructor) {
+ HEAPU32[(((this.ptr)+(8))>>2)] = destructor;
+ }
+
+ get_destructor() {
+ return HEAPU32[(((this.ptr)+(8))>>2)];
+ }
+
+ set_caught(caught) {
+ caught = caught ? 1 : 0;
+ HEAP8[(this.ptr)+(12)] = caught;
+ }
+
+ get_caught() {
+ return HEAP8[(this.ptr)+(12)] != 0;
+ }
+
+ set_rethrown(rethrown) {
+ rethrown = rethrown ? 1 : 0;
+ HEAP8[(this.ptr)+(13)] = rethrown;
+ }
+
+ get_rethrown() {
+ return HEAP8[(this.ptr)+(13)] != 0;
+ }
+
+ // Initialize native structure fields. Should be called once after allocated.
+ init(type, destructor) {
+ this.set_adjusted_ptr(0);
+ this.set_type(type);
+ this.set_destructor(destructor);
+ }
+
+ set_adjusted_ptr(adjustedPtr) {
+ HEAPU32[(((this.ptr)+(16))>>2)] = adjustedPtr;
+ }
+
+ get_adjusted_ptr() {
+ return HEAPU32[(((this.ptr)+(16))>>2)];
+ }
+ }
+
+ var ___resumeException = (ptr) => {
+ if (!exceptionLast) {
+ exceptionLast = ptr;
+ }
+ throw exceptionLast;
+ };
+
+
+ var setTempRet0 = (val) => __emscripten_tempret_set(val);
+ var findMatchingCatch = (args) => {
+ var thrown =
+ exceptionLast;
+ if (!thrown) {
+ // just pass through the null ptr
+ setTempRet0(0);
+ return 0;
+ }
+ var info = new ExceptionInfo(thrown);
+ info.set_adjusted_ptr(thrown);
+ var thrownType = info.get_type();
+ if (!thrownType) {
+ // just pass through the thrown ptr
+ setTempRet0(0);
+ return thrown;
+ }
+
+ // can_catch receives a **, add indirection
+ // The different catch blocks are denoted by different types.
+ // Due to inheritance, those types may not precisely match the
+ // type of the thrown object. Find one which matches, and
+ // return the type of the catch block which should be called.
+ for (var caughtType of args) {
+ if (caughtType === 0 || caughtType === thrownType) {
+ // Catch all clause matched or exactly the same type is caught
+ break;
+ }
+ var adjusted_ptr_addr = info.ptr + 16;
+ if (___cxa_can_catch(caughtType, thrownType, adjusted_ptr_addr)) {
+ setTempRet0(caughtType);
+ return thrown;
+ }
+ }
+ setTempRet0(thrownType);
+ return thrown;
+ };
+ var ___cxa_find_matching_catch_2 = () => findMatchingCatch([]);
+
+ var ___cxa_find_matching_catch_3 = (arg0) => findMatchingCatch([arg0]);
+
+
+
+ var ___cxa_rethrow = () => {
+ var info = exceptionCaught.pop();
+ if (!info) {
+ abort('no exception to throw');
+ }
+ var ptr = info.excPtr;
+ if (!info.get_rethrown()) {
+ // Only pop if the corresponding push was through rethrow_primary_exception
+ exceptionCaught.push(info);
+ info.set_rethrown(true);
+ info.set_caught(false);
+ uncaughtExceptionCount++;
+ }
+ exceptionLast = ptr;
+ throw exceptionLast;
+ };
+
+
+
+ var ___cxa_throw = (ptr, type, destructor) => {
+ var info = new ExceptionInfo(ptr);
+ // Initialize ExceptionInfo content after it was allocated in __cxa_allocate_exception.
+ info.init(type, destructor);
+ exceptionLast = ptr;
+ uncaughtExceptionCount++;
+ throw exceptionLast;
+ };
+
+ var ___cxa_uncaught_exceptions = () => uncaughtExceptionCount;
+
+
+ /** @suppress {duplicate } */
+ var syscallGetVarargI = () => {
+ // the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number.
+ var ret = HEAP32[((+SYSCALLS.varargs)>>2)];
+ SYSCALLS.varargs += 4;
+ return ret;
+ };
+ var syscallGetVarargP = syscallGetVarargI;
+
+
+ var PATH = {
+ isAbs:(path) => path.charAt(0) === '/',
+ splitPath:(filename) => {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },
+ normalizeArray:(parts, allowAboveRoot) => {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up; up--) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },
+ normalize:(path) => {
+ var isAbsolute = PATH.isAbs(path),
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter((p) => !!p), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },
+ dirname:(path) => {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },
+ basename:(path) => path && path.match(/([^\/]+|\/)\/*$/)[1],
+ join:(...paths) => PATH.normalize(paths.join('/')),
+ join2:(l, r) => PATH.normalize(l + '/' + r),
+ };
+
+ var initRandomFill = () => {
+ // This block is not needed on v19+ since crypto.getRandomValues is builtin
+ if (ENVIRONMENT_IS_NODE) {
+ var nodeCrypto = require('crypto');
+ return (view) => nodeCrypto.randomFillSync(view);
+ }
+
+ return (view) => crypto.getRandomValues(view);
+ };
+ var randomFill = (view) => {
+ // Lazily init on the first invocation.
+ (randomFill = initRandomFill())(view);
+ };
+
+
+
+ var PATH_FS = {
+ resolve:(...args) => {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? args[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path != 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ return ''; // an invalid portion invalidates the whole thing
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = PATH.isAbs(path);
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter((p) => !!p), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },
+ relative:(from, to) => {
+ from = PATH_FS.resolve(from).substr(1);
+ to = PATH_FS.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ },
+ };
+
+
+
+ var FS_stdin_getChar_buffer = [];
+
+ var lengthBytesUTF8 = (str) => {
+ var len = 0;
+ for (var i = 0; i < str.length; ++i) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
+ // unit, not a Unicode code point of the character! So decode
+ // UTF16->UTF32->UTF8.
+ // See http://unicode.org/faq/utf_bom.html#utf16-3
+ var c = str.charCodeAt(i); // possibly a lead surrogate
+ if (c <= 0x7F) {
+ len++;
+ } else if (c <= 0x7FF) {
+ len += 2;
+ } else if (c >= 0xD800 && c <= 0xDFFF) {
+ len += 4; ++i;
+ } else {
+ len += 3;
+ }
+ }
+ return len;
+ };
+
+ var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => {
+ // Parameter maxBytesToWrite is not optional. Negative values, 0, null,
+ // undefined and false each don't write out any bytes.
+ if (!(maxBytesToWrite > 0))
+ return 0;
+
+ var startIdx = outIdx;
+ var endIdx = outIdx + maxBytesToWrite - 1; // -1 for string null terminator.
+ for (var i = 0; i < str.length; ++i) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
+ // unit, not a Unicode code point of the character! So decode
+ // UTF16->UTF32->UTF8.
+ // See http://unicode.org/faq/utf_bom.html#utf16-3
+ // For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description
+ // and https://www.ietf.org/rfc/rfc2279.txt
+ // and https://tools.ietf.org/html/rfc3629
+ var u = str.charCodeAt(i); // possibly a lead surrogate
+ if (u >= 0xD800 && u <= 0xDFFF) {
+ var u1 = str.charCodeAt(++i);
+ u = 0x10000 + ((u & 0x3FF) << 10) | (u1 & 0x3FF);
+ }
+ if (u <= 0x7F) {
+ if (outIdx >= endIdx) break;
+ heap[outIdx++] = u;
+ } else if (u <= 0x7FF) {
+ if (outIdx + 1 >= endIdx) break;
+ heap[outIdx++] = 0xC0 | (u >> 6);
+ heap[outIdx++] = 0x80 | (u & 63);
+ } else if (u <= 0xFFFF) {
+ if (outIdx + 2 >= endIdx) break;
+ heap[outIdx++] = 0xE0 | (u >> 12);
+ heap[outIdx++] = 0x80 | ((u >> 6) & 63);
+ heap[outIdx++] = 0x80 | (u & 63);
+ } else {
+ if (outIdx + 3 >= endIdx) break;
+ heap[outIdx++] = 0xF0 | (u >> 18);
+ heap[outIdx++] = 0x80 | ((u >> 12) & 63);
+ heap[outIdx++] = 0x80 | ((u >> 6) & 63);
+ heap[outIdx++] = 0x80 | (u & 63);
+ }
+ }
+ // Null-terminate the pointer to the buffer.
+ heap[outIdx] = 0;
+ return outIdx - startIdx;
+ };
+ /** @type {function(string, boolean=, number=)} */
+ function intArrayFromString(stringy, dontAddNull, length) {
+ var len = length > 0 ? length : lengthBytesUTF8(stringy)+1;
+ var u8array = new Array(len);
+ var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length);
+ if (dontAddNull) u8array.length = numBytesWritten;
+ return u8array;
+ }
+ var FS_stdin_getChar = () => {
+ if (!FS_stdin_getChar_buffer.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ // we will read data by chunks of BUFSIZE
+ var BUFSIZE = 256;
+ var buf = Buffer.alloc(BUFSIZE);
+ var bytesRead = 0;
+
+ // For some reason we must suppress a closure warning here, even though
+ // fd definitely exists on process.stdin, and is even the proper way to
+ // get the fd of stdin,
+ // https://github.com/nodejs/help/issues/2136#issuecomment-523649904
+ // This started to happen after moving this logic out of library_tty.js,
+ // so it is related to the surrounding code in some unclear manner.
+ /** @suppress {missingProperties} */
+ var fd = process.stdin.fd;
+
+ try {
+ bytesRead = fs.readSync(fd, buf, 0, BUFSIZE);
+ } catch(e) {
+ // Cross-platform differences: on Windows, reading EOF throws an
+ // exception, but on other OSes, reading EOF returns 0. Uniformize
+ // behavior by treating the EOF exception to return 0.
+ if (e.toString().includes('EOF')) bytesRead = 0;
+ else throw e;
+ }
+
+ if (bytesRead > 0) {
+ result = buf.slice(0, bytesRead).toString('utf-8');
+ }
+ } else
+ if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else
+ {}
+ if (!result) {
+ return null;
+ }
+ FS_stdin_getChar_buffer = intArrayFromString(result, true);
+ }
+ return FS_stdin_getChar_buffer.shift();
+ };
+ var TTY = {
+ ttys:[],
+ init() {
+ // https://github.com/emscripten-core/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process.stdin.setEncoding('utf8');
+ // }
+ },
+ shutdown() {
+ // https://github.com/emscripten-core/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process.stdin.pause();
+ // }
+ },
+ register(dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },
+ stream_ops:{
+ open(stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(43);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },
+ close(stream) {
+ // flush any pending line data
+ stream.tty.ops.fsync(stream.tty);
+ },
+ fsync(stream) {
+ stream.tty.ops.fsync(stream.tty);
+ },
+ read(stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(60);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(6);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.atime = Date.now();
+ }
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(60);
+ }
+ try {
+ for (var i = 0; i < length; i++) {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ }
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (length) {
+ stream.node.mtime = stream.node.ctime = Date.now();
+ }
+ return i;
+ },
+ },
+ default_tty_ops:{
+ get_char(tty) {
+ return FS_stdin_getChar();
+ },
+ put_char(tty, val) {
+ if (val === null || val === 10) {
+ out(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ } else {
+ if (val != 0) tty.output.push(val); // val == 0 would cut text output off in the middle.
+ }
+ },
+ fsync(tty) {
+ if (tty.output && tty.output.length > 0) {
+ out(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ }
+ },
+ ioctl_tcgets(tty) {
+ // typical setting
+ return {
+ c_iflag: 25856,
+ c_oflag: 5,
+ c_cflag: 191,
+ c_lflag: 35387,
+ c_cc: [
+ 0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00,
+ 0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ]
+ };
+ },
+ ioctl_tcsets(tty, optional_actions, data) {
+ // currently just ignore
+ return 0;
+ },
+ ioctl_tiocgwinsz(tty) {
+ return [24, 80];
+ },
+ },
+ default_tty1_ops:{
+ put_char(tty, val) {
+ if (val === null || val === 10) {
+ err(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ } else {
+ if (val != 0) tty.output.push(val);
+ }
+ },
+ fsync(tty) {
+ if (tty.output && tty.output.length > 0) {
+ err(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ }
+ },
+ },
+ };
+
+
+ var zeroMemory = (address, size) => {
+ HEAPU8.fill(0, address, address + size);
+ };
+
+ var alignMemory = (size, alignment) => {
+ return Math.ceil(size / alignment) * alignment;
+ };
+ var mmapAlloc = (size) => {
+ size = alignMemory(size, 65536);
+ var ptr = _emscripten_builtin_memalign(65536, size);
+ if (ptr) zeroMemory(ptr, size);
+ return ptr;
+ };
+ var MEMFS = {
+ ops_table:null,
+ mount(mount) {
+ return MEMFS.createNode(null, '/', 16895, 0);
+ },
+ createNode(parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(63);
+ }
+ MEMFS.ops_table ||= {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap,
+ msync: MEMFS.stream_ops.msync
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ }
+ };
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.usedBytes = 0; // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity.
+ // When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred
+ // for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size
+ // penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme.
+ node.contents = null;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.atime = node.mtime = node.ctime = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ parent.atime = parent.mtime = parent.ctime = node.atime;
+ }
+ return node;
+ },
+ getFileDataAsTypedArray(node) {
+ if (!node.contents) return new Uint8Array(0);
+ if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes.
+ return new Uint8Array(node.contents);
+ },
+ expandFileStorage(node, newCapacity) {
+ var prevCapacity = node.contents ? node.contents.length : 0;
+ if (prevCapacity >= newCapacity) return; // No need to expand, the storage was already large enough.
+ // Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity.
+ // For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to
+ // avoid overshooting the allocation cap by a very large margin.
+ var CAPACITY_DOUBLING_MAX = 1024 * 1024;
+ newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2.0 : 1.125)) >>> 0);
+ if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); // At minimum allocate 256b for each file when expanding.
+ var oldContents = node.contents;
+ node.contents = new Uint8Array(newCapacity); // Allocate new storage.
+ if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage.
+ },
+ resizeFileStorage(node, newSize) {
+ if (node.usedBytes == newSize) return;
+ if (newSize == 0) {
+ node.contents = null; // Fully decommit when requesting a resize to zero.
+ node.usedBytes = 0;
+ } else {
+ var oldContents = node.contents;
+ node.contents = new Uint8Array(newSize); // Allocate new storage.
+ if (oldContents) {
+ node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); // Copy old data over to the new storage.
+ }
+ node.usedBytes = newSize;
+ }
+ },
+ node_ops:{
+ getattr(node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.usedBytes;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.atime);
+ attr.mtime = new Date(node.mtime);
+ attr.ctime = new Date(node.ctime);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },
+ setattr(node, attr) {
+ for (const key of ["mode", "atime", "mtime", "ctime"]) {
+ if (attr[key] != null) {
+ node[key] = attr[key];
+ }
+ }
+ if (attr.size !== undefined) {
+ MEMFS.resizeFileStorage(node, attr.size);
+ }
+ },
+ lookup(parent, name) {
+ throw MEMFS.doesNotExistError;
+ },
+ mknod(parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },
+ rename(old_node, new_dir, new_name) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {}
+ if (new_node) {
+ if (FS.isDir(old_node.mode)) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(55);
+ }
+ }
+ FS.hashRemoveNode(new_node);
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ new_dir.contents[new_name] = old_node;
+ old_node.name = new_name;
+ new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now();
+ },
+ unlink(parent, name) {
+ delete parent.contents[name];
+ parent.ctime = parent.mtime = Date.now();
+ },
+ rmdir(parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(55);
+ }
+ delete parent.contents[name];
+ parent.ctime = parent.mtime = Date.now();
+ },
+ readdir(node) {
+ return ['.', '..', ...Object.keys(node.contents)];
+ },
+ symlink(parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 0o777 | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },
+ readlink(node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(28);
+ }
+ return node.link;
+ },
+ },
+ stream_ops:{
+ read(stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= stream.node.usedBytes) return 0;
+ var size = Math.min(stream.node.usedBytes - position, length);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else {
+ for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i];
+ }
+ return size;
+ },
+ write(stream, buffer, offset, length, position, canOwn) {
+ // If the buffer is located in main memory (HEAP), and if
+ // memory can grow, we can't hold on to references of the
+ // memory buffer, as they may get invalidated. That means we
+ // need to do copy its contents.
+ if (buffer.buffer === HEAP8.buffer) {
+ canOwn = false;
+ }
+
+ if (!length) return 0;
+ var node = stream.node;
+ node.mtime = node.ctime = Date.now();
+
+ if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array?
+ if (canOwn) {
+ node.contents = buffer.subarray(offset, offset + length);
+ node.usedBytes = length;
+ return length;
+ } else if (node.usedBytes === 0 && position === 0) { // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data.
+ node.contents = buffer.slice(offset, offset + length);
+ node.usedBytes = length;
+ return length;
+ } else if (position + length <= node.usedBytes) { // Writing to an already allocated and used subrange of the file?
+ node.contents.set(buffer.subarray(offset, offset + length), position);
+ return length;
+ }
+ }
+
+ // Appending to an existing file and we need to reallocate, or source data did not come as a typed array.
+ MEMFS.expandFileStorage(node, position+length);
+ if (node.contents.subarray && buffer.subarray) {
+ // Use typed array write which is available.
+ node.contents.set(buffer.subarray(offset, offset + length), position);
+ } else {
+ for (var i = 0; i < length; i++) {
+ node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not.
+ }
+ }
+ node.usedBytes = Math.max(node.usedBytes, position + length);
+ return length;
+ },
+ llseek(stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) {
+ position += stream.position;
+ } else if (whence === 2) {
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.usedBytes;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ return position;
+ },
+ allocate(stream, offset, length) {
+ MEMFS.expandFileStorage(stream.node, offset + length);
+ stream.node.usedBytes = Math.max(stream.node.usedBytes, offset + length);
+ },
+ mmap(stream, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) {
+ // We can't emulate MAP_SHARED when the file is not backed by the
+ // buffer we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ allocated = true;
+ ptr = mmapAlloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(48);
+ }
+ if (contents) {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ HEAP8.set(contents, ptr);
+ }
+ }
+ return { ptr, allocated };
+ },
+ msync(stream, buffer, offset, length, mmapFlags) {
+ MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false);
+ // should we check if bytesWritten and length are the same?
+ return 0;
+ },
+ },
+ };
+
+ var asyncLoad = async (url) => {
+ var arrayBuffer = await readAsync(url);
+ return new Uint8Array(arrayBuffer);
+ };
+
+
+ var FS_createDataFile = (parent, name, fileData, canRead, canWrite, canOwn) => {
+ FS.createDataFile(parent, name, fileData, canRead, canWrite, canOwn);
+ };
+
+ var preloadPlugins = Module['preloadPlugins'] || [];
+ var FS_handledByPreloadPlugin = (byteArray, fullname, finish, onerror) => {
+ // Ensure plugins are ready.
+ if (typeof Browser != 'undefined') Browser.init();
+
+ var handled = false;
+ preloadPlugins.forEach((plugin) => {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, onerror);
+ handled = true;
+ }
+ });
+ return handled;
+ };
+ var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => {
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent;
+ var dep = getUniqueRunDependency(`cp ${fullname}`); // might have several active requests for the same fullname
+ function processData(byteArray) {
+ function finish(byteArray) {
+ preFinish?.();
+ if (!dontCreateFile) {
+ FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ onload?.();
+ removeRunDependency(dep);
+ }
+ if (FS_handledByPreloadPlugin(byteArray, fullname, finish, () => {
+ onerror?.();
+ removeRunDependency(dep);
+ })) {
+ return;
+ }
+ finish(byteArray);
+ }
+ addRunDependency(dep);
+ if (typeof url == 'string') {
+ asyncLoad(url).then(processData, onerror);
+ } else {
+ processData(url);
+ }
+ };
+
+ var FS_modeStringToFlags = (str) => {
+ var flagModes = {
+ 'r': 0,
+ 'r+': 2,
+ 'w': 512 | 64 | 1,
+ 'w+': 512 | 64 | 2,
+ 'a': 1024 | 64 | 1,
+ 'a+': 1024 | 64 | 2,
+ };
+ var flags = flagModes[str];
+ if (typeof flags == 'undefined') {
+ throw new Error(`Unknown file open mode: ${str}`);
+ }
+ return flags;
+ };
+
+ var FS_getMode = (canRead, canWrite) => {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ };
+
+
+
+ var FS = {
+ root:null,
+ mounts:[],
+ devices:{
+ },
+ streams:[],
+ nextInode:1,
+ nameTable:null,
+ currentPath:"/",
+ initialized:false,
+ ignorePermissions:true,
+ ErrnoError:class {
+ name = 'ErrnoError';
+ // We set the `name` property to be able to identify `FS.ErrnoError`
+ // - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway.
+ // - when using PROXYFS, an error can come from an underlying FS
+ // as different FS objects have their own FS.ErrnoError each,
+ // the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs.
+ // we'll use the reliable test `err.name == "ErrnoError"` instead
+ constructor(errno) {
+ this.errno = errno;
+ }
+ },
+ filesystems:null,
+ syncFSRequests:0,
+ readFiles:{
+ },
+ FSStream:class {
+ shared = {};
+ get object() {
+ return this.node;
+ }
+ set object(val) {
+ this.node = val;
+ }
+ get isRead() {
+ return (this.flags & 2097155) !== 1;
+ }
+ get isWrite() {
+ return (this.flags & 2097155) !== 0;
+ }
+ get isAppend() {
+ return (this.flags & 1024);
+ }
+ get flags() {
+ return this.shared.flags;
+ }
+ set flags(val) {
+ this.shared.flags = val;
+ }
+ get position() {
+ return this.shared.position;
+ }
+ set position(val) {
+ this.shared.position = val;
+ }
+ },
+ FSNode:class {
+ node_ops = {};
+ stream_ops = {};
+ readMode = 292 | 73;
+ writeMode = 146;
+ mounted = null;
+ constructor(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.rdev = rdev;
+ this.atime = this.mtime = this.ctime = Date.now();
+ }
+ get read() {
+ return (this.mode & this.readMode) === this.readMode;
+ }
+ set read(val) {
+ val ? this.mode |= this.readMode : this.mode &= ~this.readMode;
+ }
+ get write() {
+ return (this.mode & this.writeMode) === this.writeMode;
+ }
+ set write(val) {
+ val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode;
+ }
+ get isFolder() {
+ return FS.isDir(this.mode);
+ }
+ get isDevice() {
+ return FS.isChrdev(this.mode);
+ }
+ },
+ lookupPath(path, opts = {}) {
+ if (!path) {
+ throw new FS.ErrnoError(44);
+ }
+ opts.follow_mount ??= true
+
+ if (!PATH.isAbs(path)) {
+ path = FS.cwd() + '/' + path;
+ }
+
+ // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) {
+ // split the absolute path
+ var parts = path.split('/').filter((p) => !!p);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ if (parts[i] === '.') {
+ continue;
+ }
+
+ if (parts[i] === '..') {
+ current_path = PATH.dirname(current_path);
+ current = current.parent;
+ continue;
+ }
+
+ current_path = PATH.join2(current_path, parts[i]);
+ try {
+ current = FS.lookupNode(current, parts[i]);
+ } catch (e) {
+ // if noent_okay is true, suppress a ENOENT in the last component
+ // and return an object with an undefined node. This is needed for
+ // resolving symlinks in the path when creating a file.
+ if ((e?.errno === 44) && islast && opts.noent_okay) {
+ return { path: current_path };
+ }
+ throw e;
+ }
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (FS.isLink(current.mode) && (!islast || opts.follow)) {
+ if (!current.node_ops.readlink) {
+ throw new FS.ErrnoError(52);
+ }
+ var link = current.node_ops.readlink(current);
+ if (!PATH.isAbs(link)) {
+ link = PATH.dirname(current_path) + '/' + link;
+ }
+ path = link + '/' + parts.slice(i + 1).join('/');
+ continue linkloop;
+ }
+ }
+ return { path: current_path, node: current };
+ }
+ throw new FS.ErrnoError(32);
+ },
+ getPath(node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path;
+ }
+ path = path ? `${node.name}/${path}` : node.name;
+ node = node.parent;
+ }
+ },
+ hashName(parentid, name) {
+ var hash = 0;
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },
+ hashAddNode(node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },
+ hashRemoveNode(node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },
+ lookupNode(parent, name) {
+ var errCode = FS.mayLookup(parent);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },
+ createNode(parent, name, mode, rdev) {
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },
+ destroyNode(node) {
+ FS.hashRemoveNode(node);
+ },
+ isRoot(node) {
+ return node === node.parent;
+ },
+ isMountpoint(node) {
+ return !!node.mounted;
+ },
+ isFile(mode) {
+ return (mode & 61440) === 32768;
+ },
+ isDir(mode) {
+ return (mode & 61440) === 16384;
+ },
+ isLink(mode) {
+ return (mode & 61440) === 40960;
+ },
+ isChrdev(mode) {
+ return (mode & 61440) === 8192;
+ },
+ isBlkdev(mode) {
+ return (mode & 61440) === 24576;
+ },
+ isFIFO(mode) {
+ return (mode & 61440) === 4096;
+ },
+ isSocket(mode) {
+ return (mode & 49152) === 49152;
+ },
+ flagsToPermissionString(flag) {
+ var perms = ['r', 'w', 'rw'][flag & 3];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },
+ nodePermissions(node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.includes('r') && !(node.mode & 292)) {
+ return 2;
+ } else if (perms.includes('w') && !(node.mode & 146)) {
+ return 2;
+ } else if (perms.includes('x') && !(node.mode & 73)) {
+ return 2;
+ }
+ return 0;
+ },
+ mayLookup(dir) {
+ if (!FS.isDir(dir.mode)) return 54;
+ var errCode = FS.nodePermissions(dir, 'x');
+ if (errCode) return errCode;
+ if (!dir.node_ops.lookup) return 2;
+ return 0;
+ },
+ mayCreate(dir, name) {
+ if (!FS.isDir(dir.mode)) {
+ return 54;
+ }
+ try {
+ var node = FS.lookupNode(dir, name);
+ return 20;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },
+ mayDelete(dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var errCode = FS.nodePermissions(dir, 'wx');
+ if (errCode) {
+ return errCode;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return 54;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return 10;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return 31;
+ }
+ }
+ return 0;
+ },
+ mayOpen(node, flags) {
+ if (!node) {
+ return 44;
+ }
+ if (FS.isLink(node.mode)) {
+ return 32;
+ } else if (FS.isDir(node.mode)) {
+ if (FS.flagsToPermissionString(flags) !== 'r' // opening for write
+ || (flags & (512 | 64))) { // TODO: check for O_SEARCH? (== search for dir only)
+ return 31;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },
+ checkOpExists(op, err) {
+ if (!op) {
+ throw new FS.ErrnoError(err);
+ }
+ return op;
+ },
+ MAX_OPEN_FDS:4096,
+ nextfd() {
+ for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(33);
+ },
+ getStreamChecked(fd) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(8);
+ }
+ return stream;
+ },
+ getStream:(fd) => FS.streams[fd],
+ createStream(stream, fd = -1) {
+
+ // clone it, so we can return an instance of FSStream
+ stream = Object.assign(new FS.FSStream(), stream);
+ if (fd == -1) {
+ fd = FS.nextfd();
+ }
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },
+ closeStream(fd) {
+ FS.streams[fd] = null;
+ },
+ dupStream(origStream, fd = -1) {
+ var stream = FS.createStream(origStream, fd);
+ stream.stream_ops?.dup?.(stream);
+ return stream;
+ },
+ chrdev_stream_ops:{
+ open(stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ stream.stream_ops.open?.(stream);
+ },
+ llseek() {
+ throw new FS.ErrnoError(70);
+ },
+ },
+ major:(dev) => ((dev) >> 8),
+ minor:(dev) => ((dev) & 0xff),
+ makedev:(ma, mi) => ((ma) << 8 | (mi)),
+ registerDevice(dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },
+ getDevice:(dev) => FS.devices[dev],
+ getMounts(mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push(...m.mounts);
+ }
+
+ return mounts;
+ },
+ syncfs(populate, callback) {
+ if (typeof populate == 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ FS.syncFSRequests++;
+
+ if (FS.syncFSRequests > 1) {
+ err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`);
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function doCallback(errCode) {
+ FS.syncFSRequests--;
+ return callback(errCode);
+ }
+
+ function done(errCode) {
+ if (errCode) {
+ if (!done.errored) {
+ done.errored = true;
+ return doCallback(errCode);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ doCallback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach((mount) => {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },
+ mount(type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(10);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ }
+
+ var mount = {
+ type,
+ opts,
+ mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },
+ unmount(mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(28);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach((hash) => {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.includes(current.mount)) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ node.mount.mounts.splice(idx, 1);
+ },
+ lookup(parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },
+ mknod(path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ if (!name) {
+ throw new FS.ErrnoError(28);
+ }
+ if (name === '.' || name === '..') {
+ throw new FS.ErrnoError(20);
+ }
+ var errCode = FS.mayCreate(parent, name);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(63);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },
+ statfs(path) {
+ return FS.statfsNode(FS.lookupPath(path, {follow: true}).node);
+ },
+ statfsStream(stream) {
+ // We keep a separate statfsStream function because noderawfs overrides
+ // it. In noderawfs, stream.node is sometimes null. Instead, we need to
+ // look at stream.path.
+ return FS.statfsNode(stream.node);
+ },
+ statfsNode(node) {
+ // NOTE: None of the defaults here are true. We're just returning safe and
+ // sane values. Currently nodefs and rawfs replace these defaults,
+ // other file systems leave them alone.
+ var rtn = {
+ bsize: 4096,
+ frsize: 4096,
+ blocks: 1e6,
+ bfree: 5e5,
+ bavail: 5e5,
+ files: FS.nextInode,
+ ffree: FS.nextInode - 1,
+ fsid: 42,
+ flags: 2,
+ namelen: 255,
+ };
+
+ if (node.node_ops.statfs) {
+ Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root));
+ }
+ return rtn;
+ },
+ create(path, mode = 0o666) {
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },
+ mkdir(path, mode = 0o777) {
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },
+ mkdirTree(path, mode) {
+ var dirs = path.split('/');
+ var d = '';
+ for (var i = 0; i < dirs.length; ++i) {
+ if (!dirs[i]) continue;
+ d += '/' + dirs[i];
+ try {
+ FS.mkdir(d, mode);
+ } catch(e) {
+ if (e.errno != 20) throw e;
+ }
+ }
+ },
+ mkdev(path, mode, dev) {
+ if (typeof dev == 'undefined') {
+ dev = mode;
+ mode = 0o666;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },
+ symlink(oldpath, newpath) {
+ if (!PATH_FS.resolve(oldpath)) {
+ throw new FS.ErrnoError(44);
+ }
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ if (!parent) {
+ throw new FS.ErrnoError(44);
+ }
+ var newname = PATH.basename(newpath);
+ var errCode = FS.mayCreate(parent, newname);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(63);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },
+ rename(old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+
+ // let the errors from non existent directories percolate up
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+
+ if (!old_dir || !new_dir) throw new FS.ErrnoError(44);
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(75);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH_FS.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(28);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH_FS.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(55);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var errCode = FS.mayDelete(old_dir, old_name, isdir);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ errCode = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(10);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ errCode = FS.nodePermissions(old_dir, 'w');
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ // update old node (we do this here to avoid each backend
+ // needing to)
+ old_node.parent = new_dir;
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },
+ rmdir(path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var errCode = FS.mayDelete(parent, name, true);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },
+ readdir(path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ var readdir = FS.checkOpExists(node.node_ops.readdir, 54);
+ return readdir(node);
+ },
+ unlink(path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ if (!parent) {
+ throw new FS.ErrnoError(44);
+ }
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var errCode = FS.mayDelete(parent, name, false);
+ if (errCode) {
+ // According to POSIX, we should map EISDIR to EPERM, but
+ // we instead do what Linux does (and we must, as we use
+ // the musl linux libc).
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },
+ readlink(path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link) {
+ throw new FS.ErrnoError(44);
+ }
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(28);
+ }
+ return link.node_ops.readlink(link);
+ },
+ stat(path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ var getattr = FS.checkOpExists(node.node_ops.getattr, 63);
+ return getattr(node);
+ },
+ lstat(path) {
+ return FS.stat(path, true);
+ },
+ chmod(path, mode, dontFollow) {
+ var node;
+ if (typeof path == 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ ctime: Date.now(),
+ dontFollow
+ });
+ },
+ lchmod(path, mode) {
+ FS.chmod(path, mode, true);
+ },
+ fchmod(fd, mode) {
+ var stream = FS.getStreamChecked(fd);
+ FS.chmod(stream.node, mode);
+ },
+ chown(path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path == 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ timestamp: Date.now(),
+ dontFollow
+ // we ignore the uid / gid for now
+ });
+ },
+ lchown(path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },
+ fchown(fd, uid, gid) {
+ var stream = FS.getStreamChecked(fd);
+ FS.chown(stream.node, uid, gid);
+ },
+ truncate(path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ var node;
+ if (typeof path == 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(28);
+ }
+ var errCode = FS.nodePermissions(node, 'w');
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },
+ ftruncate(fd, len) {
+ var stream = FS.getStreamChecked(fd);
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(28);
+ }
+ FS.truncate(stream.node, len);
+ },
+ utime(path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ atime: atime,
+ mtime: mtime
+ });
+ },
+ open(path, flags, mode = 0o666) {
+ if (path === "") {
+ throw new FS.ErrnoError(44);
+ }
+ flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ var isDirPath;
+ if (typeof path == 'object') {
+ node = path;
+ } else {
+ isDirPath = path.endsWith("/");
+ // noent_okay makes it so that if the final component of the path
+ // doesn't exist, lookupPath returns `node: undefined`. `path` will be
+ // updated to point to the target of all symlinks.
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072),
+ noent_okay: true
+ });
+ node = lookup.node;
+ path = lookup.path;
+ }
+ // perhaps we need to create the node
+ var created = false;
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(20);
+ }
+ } else if (isDirPath) {
+ throw new FS.ErrnoError(31);
+ } else {
+ // node doesn't exist, try to create it
+ // Ignore the permission bits here to ensure we can `open` this new
+ // file below. We use chmod below the apply the permissions once the
+ // file is open.
+ node = FS.mknod(path, mode | 0o777, 0);
+ created = true;
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(44);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // if asked only for a directory, then this must be one
+ if ((flags & 65536) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ // check permissions, if this is not a file we just created now (it is ok to
+ // create and write to a file with read-only permissions; it is read-only
+ // for later use)
+ if (!created) {
+ var errCode = FS.mayOpen(node, flags);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ }
+ // do truncation if necessary
+ if ((flags & 512) && !created) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512 | 131072);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ });
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (created) {
+ FS.chmod(node, mode & 0o777);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ }
+ }
+ return stream;
+ },
+ close(stream) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (stream.getdents) stream.getdents = null; // free readdir state
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ stream.fd = null;
+ },
+ isClosed(stream) {
+ return stream.fd === null;
+ },
+ llseek(stream, offset, whence) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(70);
+ }
+ if (whence != 0 && whence != 1 && whence != 2) {
+ throw new FS.ErrnoError(28);
+ }
+ stream.position = stream.stream_ops.llseek(stream, offset, whence);
+ stream.ungotten = [];
+ return stream.position;
+ },
+ read(stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(8);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(28);
+ }
+ var seeking = typeof position != 'undefined';
+ if (!seeking) {
+ position = stream.position;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(70);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(8);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(28);
+ }
+ if (stream.seekable && stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var seeking = typeof position != 'undefined';
+ if (!seeking) {
+ position = stream.position;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(70);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },
+ allocate(stream, offset, length) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(8);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(138);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },
+ mmap(stream, length, position, prot, flags) {
+ // User requests writing to file (prot & PROT_WRITE != 0).
+ // Checking if we have permissions to write to the file unless
+ // MAP_PRIVATE flag is set. According to POSIX spec it is possible
+ // to write to file opened in read-only mode with MAP_PRIVATE flag,
+ // as all modifications will be visible only in the memory of
+ // the current process.
+ if ((prot & 2) !== 0
+ && (flags & 2) === 0
+ && (stream.flags & 2097155) !== 2) {
+ throw new FS.ErrnoError(2);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(2);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(43);
+ }
+ if (!length) {
+ throw new FS.ErrnoError(28);
+ }
+ return stream.stream_ops.mmap(stream, length, position, prot, flags);
+ },
+ msync(stream, buffer, offset, length, mmapFlags) {
+ if (!stream.stream_ops.msync) {
+ return 0;
+ }
+ return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags);
+ },
+ ioctl(stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(59);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },
+ readFile(path, opts = {}) {
+ opts.flags = opts.flags || 0;
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error(`Invalid encoding type "${opts.encoding}"`);
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = UTF8ArrayToString(buf);
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },
+ writeFile(path, data, opts = {}) {
+ opts.flags = opts.flags || 577;
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (typeof data == 'string') {
+ var buf = new Uint8Array(lengthBytesUTF8(data)+1);
+ var actualNumBytes = stringToUTF8Array(data, buf, 0, buf.length);
+ FS.write(stream, buf, 0, actualNumBytes, undefined, opts.canOwn);
+ } else if (ArrayBuffer.isView(data)) {
+ FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn);
+ } else {
+ throw new Error('Unsupported data type');
+ }
+ FS.close(stream);
+ },
+ cwd:() => FS.currentPath,
+ chdir(path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (lookup.node === null) {
+ throw new FS.ErrnoError(44);
+ }
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ var errCode = FS.nodePermissions(lookup.node, 'x');
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ FS.currentPath = lookup.path;
+ },
+ createDefaultDirectories() {
+ FS.mkdir('/tmp');
+ FS.mkdir('/home');
+ FS.mkdir('/home/web_user');
+ },
+ createDefaultDevices() {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: () => 0,
+ write: (stream, buffer, offset, length, pos) => length,
+ llseek: () => 0,
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using err() rather than out()
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // setup /dev/[u]random
+ // use a buffer to avoid overhead of individual crypto calls per byte
+ var randomBuffer = new Uint8Array(1024), randomLeft = 0;
+ var randomByte = () => {
+ if (randomLeft === 0) {
+ randomFill(randomBuffer);
+ randomLeft = randomBuffer.byteLength;
+ }
+ return randomBuffer[--randomLeft];
+ };
+ FS.createDevice('/dev', 'random', randomByte);
+ FS.createDevice('/dev', 'urandom', randomByte);
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },
+ createSpecialDirectories() {
+ // create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the
+ // name of the stream for fd 6 (see test_unistd_ttyname)
+ FS.mkdir('/proc');
+ var proc_self = FS.mkdir('/proc/self');
+ FS.mkdir('/proc/self/fd');
+ FS.mount({
+ mount() {
+ var node = FS.createNode(proc_self, 'fd', 16895, 73);
+ node.stream_ops = {
+ llseek: MEMFS.stream_ops.llseek,
+ };
+ node.node_ops = {
+ lookup(parent, name) {
+ var fd = +name;
+ var stream = FS.getStreamChecked(fd);
+ var ret = {
+ parent: null,
+ mount: { mountpoint: 'fake' },
+ node_ops: { readlink: () => stream.path },
+ id: fd + 1,
+ };
+ ret.parent = ret; // make it look like a simple root node
+ return ret;
+ },
+ readdir() {
+ return Array.from(FS.streams.entries())
+ .filter(([k, v]) => v)
+ .map(([k, v]) => k.toString());
+ }
+ };
+ return node;
+ }
+ }, {}, '/proc/self/fd');
+ },
+ createStandardStreams(input, output, error) {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (input) {
+ FS.createDevice('/dev', 'stdin', input);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (output) {
+ FS.createDevice('/dev', 'stdout', null, output);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (error) {
+ FS.createDevice('/dev', 'stderr', null, error);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 0);
+ var stdout = FS.open('/dev/stdout', 1);
+ var stderr = FS.open('/dev/stderr', 1);
+ },
+ staticInit() {
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ FS.createSpecialDirectories();
+
+ FS.filesystems = {
+ 'MEMFS': MEMFS,
+ };
+ },
+ init(input, output, error) {
+ FS.initialized = true;
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ input ??= Module['stdin'];
+ output ??= Module['stdout'];
+ error ??= Module['stderr'];
+
+ FS.createStandardStreams(input, output, error);
+ },
+ quit() {
+ FS.initialized = false;
+ // force-flush all streams, so we get musl std streams printed out
+ // close all of our streams
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },
+ findObject(path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (!ret.exists) {
+ return null;
+ }
+ return ret.object;
+ },
+ analyzePath(path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },
+ createPath(parent, path, canRead, canWrite) {
+ parent = typeof parent == 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },
+ createFile(parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS_getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },
+ createDataFile(parent, name, data, canRead, canWrite, canOwn) {
+ var path = name;
+ if (parent) {
+ parent = typeof parent == 'string' ? parent : FS.getPath(parent);
+ path = name ? PATH.join2(parent, name) : parent;
+ }
+ var mode = FS_getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data == 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 577);
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ },
+ createDevice(parent, name, input, output) {
+ var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS_getMode(!!input, !!output);
+ FS.createDevice.major ??= 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open(stream) {
+ stream.seekable = false;
+ },
+ close(stream) {
+ // flush any pending line data
+ if (output?.buffer?.length) {
+ output(10);
+ }
+ },
+ read(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(6);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.atime = Date.now();
+ }
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ }
+ if (length) {
+ stream.node.mtime = stream.node.ctime = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },
+ forceLoadFile(obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ if (typeof XMLHttpRequest != 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else { // Command-line.
+ try {
+ obj.contents = readBinary(obj.url);
+ obj.usedBytes = obj.contents.length;
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ }
+ },
+ createLazyFile(parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array).
+ // Actual getting is abstracted away for eventual reuse.
+ class LazyUint8Array {
+ lengthKnown = false;
+ chunks = []; // Loaded chunks. Index is the chunk number
+ get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = (idx / this.chunkSize)|0;
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ setDataGetter(getter) {
+ this.getter = getter;
+ }
+ cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip";
+
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (from, to) => {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(/** @type{Array} */(xhr.response || []));
+ }
+ return intArrayFromString(xhr.responseText || '', true);
+ };
+ var lazyArray = this;
+ lazyArray.setDataGetter((chunkNum) => {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof lazyArray.chunks[chunkNum] == 'undefined') {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof lazyArray.chunks[chunkNum] == 'undefined') throw new Error('doXHR failed!');
+ return lazyArray.chunks[chunkNum];
+ });
+
+ if (usesGzip || !datalength) {
+ // if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length
+ chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file
+ datalength = this.getter(0).length;
+ chunkSize = datalength;
+ out("LazyFiles on gzip forces download of the whole file when length is accessed");
+ }
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ get length() {
+ if (!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ get chunkSize() {
+ if (!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ }
+
+ if (typeof XMLHttpRequest != 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // Add a function that defers querying the file size until it is asked the first time.
+ Object.defineProperties(node, {
+ usedBytes: {
+ get: function() { return this.contents.length; }
+ }
+ });
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach((key) => {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = (...args) => {
+ FS.forceLoadFile(node);
+ return fn(...args);
+ };
+ });
+ function writeChunks(stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ }
+ // use a custom read function
+ stream_ops.read = (stream, buffer, offset, length, position) => {
+ FS.forceLoadFile(node);
+ return writeChunks(stream, buffer, offset, length, position)
+ };
+ // use a custom mmap function
+ stream_ops.mmap = (stream, length, position, prot, flags) => {
+ FS.forceLoadFile(node);
+ var ptr = mmapAlloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(48);
+ }
+ writeChunks(stream, HEAP8, ptr, length, position);
+ return { ptr, allocated: true };
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },
+ };
+
+ var SYSCALLS = {
+ DEFAULT_POLLMASK:5,
+ calculateAt(dirfd, path, allowEmpty) {
+ if (PATH.isAbs(path)) {
+ return path;
+ }
+ // relative path
+ var dir;
+ if (dirfd === -100) {
+ dir = FS.cwd();
+ } else {
+ var dirstream = SYSCALLS.getStreamFromFD(dirfd);
+ dir = dirstream.path;
+ }
+ if (path.length == 0) {
+ if (!allowEmpty) {
+ throw new FS.ErrnoError(44);;
+ }
+ return dir;
+ }
+ return dir + '/' + path;
+ },
+ writeStat(buf, stat) {
+ HEAP32[((buf)>>2)] = stat.dev;
+ HEAP32[(((buf)+(4))>>2)] = stat.mode;
+ HEAPU32[(((buf)+(8))>>2)] = stat.nlink;
+ HEAP32[(((buf)+(12))>>2)] = stat.uid;
+ HEAP32[(((buf)+(16))>>2)] = stat.gid;
+ HEAP32[(((buf)+(20))>>2)] = stat.rdev;
+ HEAP64[(((buf)+(24))>>3)] = BigInt(stat.size);
+ HEAP32[(((buf)+(32))>>2)] = 4096;
+ HEAP32[(((buf)+(36))>>2)] = stat.blocks;
+ var atime = stat.atime.getTime();
+ var mtime = stat.mtime.getTime();
+ var ctime = stat.ctime.getTime();
+ HEAP64[(((buf)+(40))>>3)] = BigInt(Math.floor(atime / 1000));
+ HEAPU32[(((buf)+(48))>>2)] = (atime % 1000) * 1000 * 1000;
+ HEAP64[(((buf)+(56))>>3)] = BigInt(Math.floor(mtime / 1000));
+ HEAPU32[(((buf)+(64))>>2)] = (mtime % 1000) * 1000 * 1000;
+ HEAP64[(((buf)+(72))>>3)] = BigInt(Math.floor(ctime / 1000));
+ HEAPU32[(((buf)+(80))>>2)] = (ctime % 1000) * 1000 * 1000;
+ HEAP64[(((buf)+(88))>>3)] = BigInt(stat.ino);
+ return 0;
+ },
+ writeStatFs(buf, stats) {
+ HEAP32[(((buf)+(4))>>2)] = stats.bsize;
+ HEAP32[(((buf)+(40))>>2)] = stats.bsize;
+ HEAP32[(((buf)+(8))>>2)] = stats.blocks;
+ HEAP32[(((buf)+(12))>>2)] = stats.bfree;
+ HEAP32[(((buf)+(16))>>2)] = stats.bavail;
+ HEAP32[(((buf)+(20))>>2)] = stats.files;
+ HEAP32[(((buf)+(24))>>2)] = stats.ffree;
+ HEAP32[(((buf)+(28))>>2)] = stats.fsid;
+ HEAP32[(((buf)+(44))>>2)] = stats.flags; // ST_NOSUID
+ HEAP32[(((buf)+(36))>>2)] = stats.namelen;
+ },
+ doMsync(addr, stream, len, flags, offset) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ if (flags & 2) {
+ // MAP_PRIVATE calls need not to be synced back to underlying fs
+ return 0;
+ }
+ var buffer = HEAPU8.slice(addr, addr + len);
+ FS.msync(stream, buffer, offset, len, flags);
+ },
+ getStreamFromFD(fd) {
+ var stream = FS.getStreamChecked(fd);
+ return stream;
+ },
+ varargs:undefined,
+ getStr(ptr) {
+ var ret = UTF8ToString(ptr);
+ return ret;
+ },
+ };
+ function ___syscall_fcntl64(fd, cmd, varargs) {
+ SYSCALLS.varargs = varargs;
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ switch (cmd) {
+ case 0: {
+ var arg = syscallGetVarargI();
+ if (arg < 0) {
+ return -28;
+ }
+ while (FS.streams[arg]) {
+ arg++;
+ }
+ var newStream;
+ newStream = FS.dupStream(stream, arg);
+ return newStream.fd;
+ }
+ case 1:
+ case 2:
+ return 0; // FD_CLOEXEC makes no sense for a single process.
+ case 3:
+ return stream.flags;
+ case 4: {
+ var arg = syscallGetVarargI();
+ stream.flags |= arg;
+ return 0;
+ }
+ case 12: {
+ var arg = syscallGetVarargP();
+ var offset = 0;
+ // We're always unlocked.
+ HEAP16[(((arg)+(offset))>>1)] = 2;
+ return 0;
+ }
+ case 13:
+ case 14:
+ return 0; // Pretend that the locking is successful.
+ }
+ return -28;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_fstat64(fd, buf) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ return SYSCALLS.writeStat(buf, FS.stat(stream.path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ var stringToUTF8 = (str, outPtr, maxBytesToWrite) => {
+ return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite);
+ };
+
+ function ___syscall_getdents64(fd, dirp, count) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd)
+ stream.getdents ||= FS.readdir(stream.path);
+
+ var struct_size = 280;
+ var pos = 0;
+ var off = FS.llseek(stream, 0, 1);
+
+ var startIdx = Math.floor(off / struct_size);
+ var endIdx = Math.min(stream.getdents.length, startIdx + Math.floor(count/struct_size))
+ for (var idx = startIdx; idx < endIdx; idx++) {
+ var id;
+ var type;
+ var name = stream.getdents[idx];
+ if (name === '.') {
+ id = stream.node.id;
+ type = 4; // DT_DIR
+ }
+ else if (name === '..') {
+ var lookup = FS.lookupPath(stream.path, { parent: true });
+ id = lookup.node.id;
+ type = 4; // DT_DIR
+ }
+ else {
+ var child;
+ try {
+ child = FS.lookupNode(stream.node, name);
+ } catch (e) {
+ // If the entry is not a directory, file, or symlink, nodefs
+ // lookupNode will raise EINVAL. Skip these and continue.
+ if (e?.errno === 28) {
+ continue;
+ }
+ throw e;
+ }
+ id = child.id;
+ type = FS.isChrdev(child.mode) ? 2 : // DT_CHR, character device.
+ FS.isDir(child.mode) ? 4 : // DT_DIR, directory.
+ FS.isLink(child.mode) ? 10 : // DT_LNK, symbolic link.
+ 8; // DT_REG, regular file.
+ }
+ HEAP64[((dirp + pos)>>3)] = BigInt(id);
+ HEAP64[(((dirp + pos)+(8))>>3)] = BigInt((idx + 1) * struct_size);
+ HEAP16[(((dirp + pos)+(16))>>1)] = 280;
+ HEAP8[(dirp + pos)+(18)] = type;
+ stringToUTF8(name, dirp + pos + 19, 256);
+ pos += struct_size;
+ }
+ FS.llseek(stream, idx * struct_size, 0);
+ return pos;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+
+ function ___syscall_ioctl(fd, op, varargs) {
+ SYSCALLS.varargs = varargs;
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ switch (op) {
+ case 21509: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+ case 21505: {
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tcgets) {
+ var termios = stream.tty.ops.ioctl_tcgets(stream);
+ var argp = syscallGetVarargP();
+ HEAP32[((argp)>>2)] = termios.c_iflag || 0;
+ HEAP32[(((argp)+(4))>>2)] = termios.c_oflag || 0;
+ HEAP32[(((argp)+(8))>>2)] = termios.c_cflag || 0;
+ HEAP32[(((argp)+(12))>>2)] = termios.c_lflag || 0;
+ for (var i = 0; i < 32; i++) {
+ HEAP8[(argp + i)+(17)] = termios.c_cc[i] || 0;
+ }
+ return 0;
+ }
+ return 0;
+ }
+ case 21510:
+ case 21511:
+ case 21512: {
+ if (!stream.tty) return -59;
+ return 0; // no-op, not actually adjusting terminal settings
+ }
+ case 21506:
+ case 21507:
+ case 21508: {
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tcsets) {
+ var argp = syscallGetVarargP();
+ var c_iflag = HEAP32[((argp)>>2)];
+ var c_oflag = HEAP32[(((argp)+(4))>>2)];
+ var c_cflag = HEAP32[(((argp)+(8))>>2)];
+ var c_lflag = HEAP32[(((argp)+(12))>>2)];
+ var c_cc = []
+ for (var i = 0; i < 32; i++) {
+ c_cc.push(HEAP8[(argp + i)+(17)]);
+ }
+ return stream.tty.ops.ioctl_tcsets(stream.tty, op, { c_iflag, c_oflag, c_cflag, c_lflag, c_cc });
+ }
+ return 0; // no-op, not actually adjusting terminal settings
+ }
+ case 21519: {
+ if (!stream.tty) return -59;
+ var argp = syscallGetVarargP();
+ HEAP32[((argp)>>2)] = 0;
+ return 0;
+ }
+ case 21520: {
+ if (!stream.tty) return -59;
+ return -28; // not supported
+ }
+ case 21531: {
+ var argp = syscallGetVarargP();
+ return FS.ioctl(stream, op, argp);
+ }
+ case 21523: {
+ // TODO: in theory we should write to the winsize struct that gets
+ // passed in, but for now musl doesn't read anything on it
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tiocgwinsz) {
+ var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty);
+ var argp = syscallGetVarargP();
+ HEAP16[((argp)>>1)] = winsize[0];
+ HEAP16[(((argp)+(2))>>1)] = winsize[1];
+ }
+ return 0;
+ }
+ case 21524: {
+ // TODO: technically, this ioctl call should change the window size.
+ // but, since emscripten doesn't have any concept of a terminal window
+ // yet, we'll just silently throw it away as we do TIOCGWINSZ
+ if (!stream.tty) return -59;
+ return 0;
+ }
+ case 21515: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+ default: return -28; // not supported
+ }
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_lstat64(path, buf) {
+ try {
+
+ path = SYSCALLS.getStr(path);
+ return SYSCALLS.writeStat(buf, FS.lstat(path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_newfstatat(dirfd, path, buf, flags) {
+ try {
+
+ path = SYSCALLS.getStr(path);
+ var nofollow = flags & 256;
+ var allowEmpty = flags & 4096;
+ flags = flags & (~6400);
+ path = SYSCALLS.calculateAt(dirfd, path, allowEmpty);
+ return SYSCALLS.writeStat(buf, nofollow ? FS.lstat(path) : FS.stat(path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+
+ function ___syscall_openat(dirfd, path, flags, varargs) {
+ SYSCALLS.varargs = varargs;
+ try {
+
+ path = SYSCALLS.getStr(path);
+ path = SYSCALLS.calculateAt(dirfd, path);
+ var mode = varargs ? syscallGetVarargI() : 0;
+ return FS.open(path, flags, mode).fd;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_stat64(path, buf) {
+ try {
+
+ path = SYSCALLS.getStr(path);
+ return SYSCALLS.writeStat(buf, FS.stat(path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ var __abort_js = () =>
+ abort('');
+
+
+ var INT53_MAX = 9007199254740992;
+
+ var INT53_MIN = -9007199254740992;
+ var bigintToI53Checked = (num) => (num < INT53_MIN || num > INT53_MAX) ? NaN : Number(num);
+ function __munmap_js(addr, len, prot, flags, fd, offset) {
+ offset = bigintToI53Checked(offset);
+
+
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ if (prot & 2) {
+ SYSCALLS.doMsync(addr, stream, len, flags, offset);
+ }
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ ;
+ }
+
+ var __tzset_js = (timezone, daylight, std_name, dst_name) => {
+ // TODO: Use (malleable) environment variables instead of system settings.
+ var currentYear = new Date().getFullYear();
+ var winter = new Date(currentYear, 0, 1);
+ var summer = new Date(currentYear, 6, 1);
+ var winterOffset = winter.getTimezoneOffset();
+ var summerOffset = summer.getTimezoneOffset();
+
+ // Local standard timezone offset. Local standard time is not adjusted for
+ // daylight savings. This code uses the fact that getTimezoneOffset returns
+ // a greater value during Standard Time versus Daylight Saving Time (DST).
+ // Thus it determines the expected output during Standard Time, and it
+ // compares whether the output of the given date the same (Standard) or less
+ // (DST).
+ var stdTimezoneOffset = Math.max(winterOffset, summerOffset);
+
+ // timezone is specified as seconds west of UTC ("The external variable
+ // `timezone` shall be set to the difference, in seconds, between
+ // Coordinated Universal Time (UTC) and local standard time."), the same
+ // as returned by stdTimezoneOffset.
+ // See http://pubs.opengroup.org/onlinepubs/009695399/functions/tzset.html
+ HEAPU32[((timezone)>>2)] = stdTimezoneOffset * 60;
+
+ HEAP32[((daylight)>>2)] = Number(winterOffset != summerOffset);
+
+ var extractZone = (timezoneOffset) => {
+ // Why inverse sign?
+ // Read here https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset
+ var sign = timezoneOffset >= 0 ? "-" : "+";
+
+ var absOffset = Math.abs(timezoneOffset)
+ var hours = String(Math.floor(absOffset / 60)).padStart(2, "0");
+ var minutes = String(absOffset % 60).padStart(2, "0");
+
+ return `UTC${sign}${hours}${minutes}`;
+ }
+
+ var winterName = extractZone(winterOffset);
+ var summerName = extractZone(summerOffset);
+ if (summerOffset < winterOffset) {
+ // Northern hemisphere
+ stringToUTF8(winterName, std_name, 17);
+ stringToUTF8(summerName, dst_name, 17);
+ } else {
+ stringToUTF8(winterName, dst_name, 17);
+ stringToUTF8(summerName, std_name, 17);
+ }
+ };
+
+ var _emscripten_get_now = () => performance.now();
+
+ var _emscripten_date_now = () => Date.now();
+
+ var nowIsMonotonic = 1;
+
+ var checkWasiClock = (clock_id) => clock_id >= 0 && clock_id <= 3;
+
+ function _clock_time_get(clk_id, ignored_precision, ptime) {
+ ignored_precision = bigintToI53Checked(ignored_precision);
+
+
+ if (!checkWasiClock(clk_id)) {
+ return 28;
+ }
+ var now;
+ // all wasi clocks but realtime are monotonic
+ if (clk_id === 0) {
+ now = _emscripten_date_now();
+ } else if (nowIsMonotonic) {
+ now = _emscripten_get_now();
+ } else {
+ return 52;
+ }
+ // "now" is in ms, and wasi times are in ns.
+ var nsec = Math.round(now * 1000 * 1000);
+ HEAP64[((ptime)>>3)] = BigInt(nsec);
+ return 0;
+ ;
+ }
+
+
+ var getHeapMax = () =>
+ // Stay one Wasm page short of 4GB: while e.g. Chrome is able to allocate
+ // full 4GB Wasm memories, the size will wrap back to 0 bytes in Wasm side
+ // for any code that deals with heap sizes, which would require special
+ // casing all heap size related code to treat 0 specially.
+ 2147483648;
+
+
+ var growMemory = (size) => {
+ var b = wasmMemory.buffer;
+ var pages = ((size - b.byteLength + 65535) / 65536) | 0;
+ try {
+ // round size grow request up to wasm page size (fixed 64KB per spec)
+ wasmMemory.grow(pages); // .grow() takes a delta compared to the previous size
+ updateMemoryViews();
+ return 1 /*success*/;
+ } catch(e) {
+ }
+ // implicit 0 return to save code size (caller will cast "undefined" into 0
+ // anyhow)
+ };
+ var _emscripten_resize_heap = (requestedSize) => {
+ var oldSize = HEAPU8.length;
+ // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned.
+ requestedSize >>>= 0;
+ // With multithreaded builds, races can happen (another thread might increase the size
+ // in between), so return a failure, and let the caller retry.
+
+ // Memory resize rules:
+ // 1. Always increase heap size to at least the requested size, rounded up
+ // to next page multiple.
+ // 2a. If MEMORY_GROWTH_LINEAR_STEP == -1, excessively resize the heap
+ // geometrically: increase the heap size according to
+ // MEMORY_GROWTH_GEOMETRIC_STEP factor (default +20%), At most
+ // overreserve by MEMORY_GROWTH_GEOMETRIC_CAP bytes (default 96MB).
+ // 2b. If MEMORY_GROWTH_LINEAR_STEP != -1, excessively resize the heap
+ // linearly: increase the heap size by at least
+ // MEMORY_GROWTH_LINEAR_STEP bytes.
+ // 3. Max size for the heap is capped at 2048MB-WASM_PAGE_SIZE, or by
+ // MAXIMUM_MEMORY, or by ASAN limit, depending on which is smallest
+ // 4. If we were unable to allocate as much memory, it may be due to
+ // over-eager decision to excessively reserve due to (3) above.
+ // Hence if an allocation fails, cut down on the amount of excess
+ // growth, in an attempt to succeed to perform a smaller allocation.
+
+ // A limit is set for how much we can grow. We should not exceed that
+ // (the wasm binary specifies it, so if we tried, we'd fail anyhow).
+ var maxHeapSize = getHeapMax();
+ if (requestedSize > maxHeapSize) {
+ return false;
+ }
+
+ // Loop through potential heap size increases. If we attempt a too eager
+ // reservation that fails, cut down on the attempted size and reserve a
+ // smaller bump instead. (max 3 times, chosen somewhat arbitrarily)
+ for (var cutDown = 1; cutDown <= 4; cutDown *= 2) {
+ var overGrownHeapSize = oldSize * (1 + 0.2 / cutDown); // ensure geometric growth
+ // but limit overreserving (default to capping at +96MB overgrowth at most)
+ overGrownHeapSize = Math.min(overGrownHeapSize, requestedSize + 100663296 );
+
+ var newSize = Math.min(maxHeapSize, alignMemory(Math.max(requestedSize, overGrownHeapSize), 65536));
+
+ var replacement = growMemory(newSize);
+ if (replacement) {
+
+ return true;
+ }
+ }
+ return false;
+ };
+
+ var ENV = {
+ };
+
+ var getExecutableName = () => thisProgram || './this.program';
+ var getEnvStrings = () => {
+ if (!getEnvStrings.strings) {
+ // Default values.
+ // Browser language detection #8751
+ var lang = ((typeof navigator == 'object' && navigator.languages && navigator.languages[0]) || 'C').replace('-', '_') + '.UTF-8';
+ var env = {
+ 'USER': 'web_user',
+ 'LOGNAME': 'web_user',
+ 'PATH': '/',
+ 'PWD': '/',
+ 'HOME': '/home/web_user',
+ 'LANG': lang,
+ '_': getExecutableName()
+ };
+ // Apply the user-provided values, if any.
+ for (var x in ENV) {
+ // x is a key in ENV; if ENV[x] is undefined, that means it was
+ // explicitly set to be so. We allow user code to do that to
+ // force variables with default values to remain unset.
+ if (ENV[x] === undefined) delete env[x];
+ else env[x] = ENV[x];
+ }
+ var strings = [];
+ for (var x in env) {
+ strings.push(`${x}=${env[x]}`);
+ }
+ getEnvStrings.strings = strings;
+ }
+ return getEnvStrings.strings;
+ };
+
+ var stringToAscii = (str, buffer) => {
+ for (var i = 0; i < str.length; ++i) {
+ HEAP8[buffer++] = str.charCodeAt(i);
+ }
+ // Null-terminate the string
+ HEAP8[buffer] = 0;
+ };
+ var _environ_get = (__environ, environ_buf) => {
+ var bufSize = 0;
+ getEnvStrings().forEach((string, i) => {
+ var ptr = environ_buf + bufSize;
+ HEAPU32[(((__environ)+(i*4))>>2)] = ptr;
+ stringToAscii(string, ptr);
+ bufSize += string.length + 1;
+ });
+ return 0;
+ };
+
+ var _environ_sizes_get = (penviron_count, penviron_buf_size) => {
+ var strings = getEnvStrings();
+ HEAPU32[((penviron_count)>>2)] = strings.length;
+ var bufSize = 0;
+ strings.forEach((string) => bufSize += string.length + 1);
+ HEAPU32[((penviron_buf_size)>>2)] = bufSize;
+ return 0;
+ };
+
+ function _fd_close(fd) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ FS.close(stream);
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ }
+
+ /** @param {number=} offset */
+ var doReadv = (stream, iov, iovcnt, offset) => {
+ var ret = 0;
+ for (var i = 0; i < iovcnt; i++) {
+ var ptr = HEAPU32[((iov)>>2)];
+ var len = HEAPU32[(((iov)+(4))>>2)];
+ iov += 8;
+ var curr = FS.read(stream, HEAP8, ptr, len, offset);
+ if (curr < 0) return -1;
+ ret += curr;
+ if (curr < len) break; // nothing more to read
+ if (typeof offset != 'undefined') {
+ offset += curr;
+ }
+ }
+ return ret;
+ };
+
+ function _fd_read(fd, iov, iovcnt, pnum) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doReadv(stream, iov, iovcnt);
+ HEAPU32[((pnum)>>2)] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ }
+
+
+ function _fd_seek(fd, offset, whence, newOffset) {
+ offset = bigintToI53Checked(offset);
+
+
+ try {
+
+ if (isNaN(offset)) return 61;
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ FS.llseek(stream, offset, whence);
+ HEAP64[((newOffset)>>3)] = BigInt(stream.position);
+ if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; // reset readdir state
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ ;
+ }
+
+ /** @param {number=} offset */
+ var doWritev = (stream, iov, iovcnt, offset) => {
+ var ret = 0;
+ for (var i = 0; i < iovcnt; i++) {
+ var ptr = HEAPU32[((iov)>>2)];
+ var len = HEAPU32[(((iov)+(4))>>2)];
+ iov += 8;
+ var curr = FS.write(stream, HEAP8, ptr, len, offset);
+ if (curr < 0) return -1;
+ ret += curr;
+ if (curr < len) {
+ // No more space to write.
+ break;
+ }
+ if (typeof offset != 'undefined') {
+ offset += curr;
+ }
+ }
+ return ret;
+ };
+
+ function _fd_write(fd, iov, iovcnt, pnum) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doWritev(stream, iov, iovcnt);
+ HEAPU32[((pnum)>>2)] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ }
+
+ var _llvm_eh_typeid_for = (type) => type;
+
+ var wasmTableMirror = [];
+
+ /** @type {WebAssembly.Table} */
+ var wasmTable;
+ var getWasmTableEntry = (funcPtr) => {
+ var func = wasmTableMirror[funcPtr];
+ if (!func) {
+ if (funcPtr >= wasmTableMirror.length) wasmTableMirror.length = funcPtr + 1;
+ /** @suppress {checkTypes} */
+ wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr);
+ }
+ return func;
+ };
+
+ var getCFunc = (ident) => {
+ var func = Module['_' + ident]; // closure exported function
+ return func;
+ };
+
+ var writeArrayToMemory = (array, buffer) => {
+ HEAP8.set(array, buffer);
+ };
+
+
+
+ var stackAlloc = (sz) => __emscripten_stack_alloc(sz);
+ var stringToUTF8OnStack = (str) => {
+ var size = lengthBytesUTF8(str) + 1;
+ var ret = stackAlloc(size);
+ stringToUTF8(str, ret, size);
+ return ret;
+ };
+
+
+
+
+
+ /**
+ * @param {string|null=} returnType
+ * @param {Array=} argTypes
+ * @param {Arguments|Array=} args
+ * @param {Object=} opts
+ */
+ var ccall = (ident, returnType, argTypes, args, opts) => {
+ // For fast lookup of conversion functions
+ var toC = {
+ 'string': (str) => {
+ var ret = 0;
+ if (str !== null && str !== undefined && str !== 0) { // null string
+ ret = stringToUTF8OnStack(str);
+ }
+ return ret;
+ },
+ 'array': (arr) => {
+ var ret = stackAlloc(arr.length);
+ writeArrayToMemory(arr, ret);
+ return ret;
+ }
+ };
+
+ function convertReturnValue(ret) {
+ if (returnType === 'string') {
+ return UTF8ToString(ret);
+ }
+ if (returnType === 'boolean') return Boolean(ret);
+ return ret;
+ }
+
+ var func = getCFunc(ident);
+ var cArgs = [];
+ var stack = 0;
+ if (args) {
+ for (var i = 0; i < args.length; i++) {
+ var converter = toC[argTypes[i]];
+ if (converter) {
+ if (stack === 0) stack = stackSave();
+ cArgs[i] = converter(args[i]);
+ } else {
+ cArgs[i] = args[i];
+ }
+ }
+ }
+ var ret = func(...cArgs);
+ function onDone(ret) {
+ if (stack !== 0) stackRestore(stack);
+ return convertReturnValue(ret);
+ }
+
+ ret = onDone(ret);
+ return ret;
+ };
+
+
+
+ /**
+ * @param {string=} returnType
+ * @param {Array=} argTypes
+ * @param {Object=} opts
+ */
+ var cwrap = (ident, returnType, argTypes, opts) => {
+ // When the function takes numbers and returns a number, we can just return
+ // the original function
+ var numericArgs = !argTypes || argTypes.every((type) => type === 'number' || type === 'boolean');
+ var numericRet = returnType !== 'string';
+ if (numericRet && numericArgs && !opts) {
+ return getCFunc(ident);
+ }
+ return (...args) => ccall(ident, returnType, argTypes, args, opts);
+ };
+
+ FS.createPreloadedFile = FS_createPreloadedFile;
+ FS.staticInit();
+ // Set module methods based on EXPORTED_RUNTIME_METHODS
+ ;
+
+ // This error may happen quite a bit. To avoid overhead we reuse it (and
+ // suffer a lack of stack info).
+ MEMFS.doesNotExistError = new FS.ErrnoError(44);
+ /** @suppress {checkTypes} */
+ MEMFS.doesNotExistError.stack = '';
+ ;
+var wasmImports = {
+ /** @export */
+ __assert_fail: ___assert_fail,
+ /** @export */
+ __cxa_begin_catch: ___cxa_begin_catch,
+ /** @export */
+ __cxa_end_catch: ___cxa_end_catch,
+ /** @export */
+ __cxa_find_matching_catch_2: ___cxa_find_matching_catch_2,
+ /** @export */
+ __cxa_find_matching_catch_3: ___cxa_find_matching_catch_3,
+ /** @export */
+ __cxa_rethrow: ___cxa_rethrow,
+ /** @export */
+ __cxa_throw: ___cxa_throw,
+ /** @export */
+ __cxa_uncaught_exceptions: ___cxa_uncaught_exceptions,
+ /** @export */
+ __resumeException: ___resumeException,
+ /** @export */
+ __syscall_fcntl64: ___syscall_fcntl64,
+ /** @export */
+ __syscall_fstat64: ___syscall_fstat64,
+ /** @export */
+ __syscall_getdents64: ___syscall_getdents64,
+ /** @export */
+ __syscall_ioctl: ___syscall_ioctl,
+ /** @export */
+ __syscall_lstat64: ___syscall_lstat64,
+ /** @export */
+ __syscall_newfstatat: ___syscall_newfstatat,
+ /** @export */
+ __syscall_openat: ___syscall_openat,
+ /** @export */
+ __syscall_stat64: ___syscall_stat64,
+ /** @export */
+ _abort_js: __abort_js,
+ /** @export */
+ _munmap_js: __munmap_js,
+ /** @export */
+ _tzset_js: __tzset_js,
+ /** @export */
+ clock_time_get: _clock_time_get,
+ /** @export */
+ emscripten_date_now: _emscripten_date_now,
+ /** @export */
+ emscripten_resize_heap: _emscripten_resize_heap,
+ /** @export */
+ environ_get: _environ_get,
+ /** @export */
+ environ_sizes_get: _environ_sizes_get,
+ /** @export */
+ fd_close: _fd_close,
+ /** @export */
+ fd_read: _fd_read,
+ /** @export */
+ fd_seek: _fd_seek,
+ /** @export */
+ fd_write: _fd_write,
+ /** @export */
+ invoke_diii,
+ /** @export */
+ invoke_fiii,
+ /** @export */
+ invoke_i,
+ /** @export */
+ invoke_ii,
+ /** @export */
+ invoke_iii,
+ /** @export */
+ invoke_iiii,
+ /** @export */
+ invoke_iiiii,
+ /** @export */
+ invoke_iiiiii,
+ /** @export */
+ invoke_iiiiiii,
+ /** @export */
+ invoke_iiiiiiii,
+ /** @export */
+ invoke_iiiiiiiii,
+ /** @export */
+ invoke_iiiiiiiiiii,
+ /** @export */
+ invoke_iiiiiiiiiiii,
+ /** @export */
+ invoke_iiiiiiiiiiiii,
+ /** @export */
+ invoke_iiiiiiijji,
+ /** @export */
+ invoke_iiiiij,
+ /** @export */
+ invoke_iiij,
+ /** @export */
+ invoke_iij,
+ /** @export */
+ invoke_ijiii,
+ /** @export */
+ invoke_jii,
+ /** @export */
+ invoke_jiiii,
+ /** @export */
+ invoke_v,
+ /** @export */
+ invoke_vi,
+ /** @export */
+ invoke_vii,
+ /** @export */
+ invoke_viii,
+ /** @export */
+ invoke_viiii,
+ /** @export */
+ invoke_viiiii,
+ /** @export */
+ invoke_viiiiii,
+ /** @export */
+ invoke_viiiiiii,
+ /** @export */
+ invoke_viiiiiiii,
+ /** @export */
+ invoke_viiiiiiiiii,
+ /** @export */
+ invoke_viiiiiiiiiiiiii,
+ /** @export */
+ invoke_viiiiiiiiiiiiiii,
+ /** @export */
+ invoke_viiij,
+ /** @export */
+ invoke_viij,
+ /** @export */
+ invoke_vij,
+ /** @export */
+ llvm_eh_typeid_for: _llvm_eh_typeid_for
+};
+var wasmExports = await createWasm();
+var ___wasm_call_ctors = wasmExports['__wasm_call_ctors']
+var _malloc = Module['_malloc'] = wasmExports['malloc']
+var _ntohs = wasmExports['ntohs']
+var _free = Module['_free'] = wasmExports['free']
+var ___cxa_free_exception = wasmExports['__cxa_free_exception']
+var _js_createSpendKeyData = Module['_js_createSpendKeyData'] = wasmExports['js_createSpendKeyData']
+var _js_createSpendKey = Module['_js_createSpendKey'] = wasmExports['js_createSpendKey']
+var _js_getSpendKey_s1 = Module['_js_getSpendKey_s1'] = wasmExports['js_getSpendKey_s1']
+var _js_getSpendKey_s2 = Module['_js_getSpendKey_s2'] = wasmExports['js_getSpendKey_s2']
+var _js_getSpendKey_r = Module['_js_getSpendKey_r'] = wasmExports['js_getSpendKey_r']
+var _js_getSpendKey_s1_hex = Module['_js_getSpendKey_s1_hex'] = wasmExports['js_getSpendKey_s1_hex']
+var _js_getSpendKey_s2_hex = Module['_js_getSpendKey_s2_hex'] = wasmExports['js_getSpendKey_s2_hex']
+var _js_getSpendKey_r_hex = Module['_js_getSpendKey_r_hex'] = wasmExports['js_getSpendKey_r_hex']
+var _js_createFullViewKey = Module['_js_createFullViewKey'] = wasmExports['js_createFullViewKey']
+var _js_createIncomingViewKey = Module['_js_createIncomingViewKey'] = wasmExports['js_createIncomingViewKey']
+var _js_getAddress = Module['_js_getAddress'] = wasmExports['js_getAddress']
+var _js_encodeAddress = Module['_js_encodeAddress'] = wasmExports['js_encodeAddress']
+var _js_isValidSparkAddress = Module['_js_isValidSparkAddress'] = wasmExports['js_isValidSparkAddress']
+var _js_decodeAddress = Module['_js_decodeAddress'] = wasmExports['js_decodeAddress']
+var _js_createMintedCoinData = Module['_js_createMintedCoinData'] = wasmExports['js_createMintedCoinData']
+var _js_createSparkMintRecipients = Module['_js_createSparkMintRecipients'] = wasmExports['js_createSparkMintRecipients']
+var _js_getRecipientVectorLength = Module['_js_getRecipientVectorLength'] = wasmExports['js_getRecipientVectorLength']
+var _js_getRecipientAt = Module['_js_getRecipientAt'] = wasmExports['js_getRecipientAt']
+var _js_getRecipientScriptPubKey = Module['_js_getRecipientScriptPubKey'] = wasmExports['js_getRecipientScriptPubKey']
+var _js_getRecipientScriptPubKeySize = Module['_js_getRecipientScriptPubKeySize'] = wasmExports['js_getRecipientScriptPubKeySize']
+var _js_getRecipientAmount = Module['_js_getRecipientAmount'] = wasmExports['js_getRecipientAmount']
+var _js_getRecipientSubtractFeeFromAmountFlag = Module['_js_getRecipientSubtractFeeFromAmountFlag'] = wasmExports['js_getRecipientSubtractFeeFromAmountFlag']
+var _js_deserializeCoin = Module['_js_deserializeCoin'] = wasmExports['js_deserializeCoin']
+var _js_getCoinFromMeta = Module['_js_getCoinFromMeta'] = wasmExports['js_getCoinFromMeta']
+var _js_getMetadata = Module['_js_getMetadata'] = wasmExports['js_getMetadata']
+var _js_getInputData = Module['_js_getInputData'] = wasmExports['js_getInputData']
+var _js_getInputDataWithMeta = Module['_js_getInputDataWithMeta'] = wasmExports['js_getInputDataWithMeta']
+var _js_identifyCoin = Module['_js_identifyCoin'] = wasmExports['js_identifyCoin']
+var _js_getIdentifiedCoinDiversifier = Module['_js_getIdentifiedCoinDiversifier'] = wasmExports['js_getIdentifiedCoinDiversifier']
+var _js_getIdentifiedCoinValue = Module['_js_getIdentifiedCoinValue'] = wasmExports['js_getIdentifiedCoinValue']
+var _js_getIdentifiedCoinMemo = Module['_js_getIdentifiedCoinMemo'] = wasmExports['js_getIdentifiedCoinMemo']
+var _js_getCSparkMintMetaHeight = Module['_js_getCSparkMintMetaHeight'] = wasmExports['js_getCSparkMintMetaHeight']
+var _js_getCSparkMintMetaId = Module['_js_getCSparkMintMetaId'] = wasmExports['js_getCSparkMintMetaId']
+var _js_getCSparkMintMetaIsUsed = Module['_js_getCSparkMintMetaIsUsed'] = wasmExports['js_getCSparkMintMetaIsUsed']
+var _js_getCSparkMintMetaMemo = Module['_js_getCSparkMintMetaMemo'] = wasmExports['js_getCSparkMintMetaMemo']
+var _js_getCSparkMintMetaNonce = Module['_js_getCSparkMintMetaNonce'] = wasmExports['js_getCSparkMintMetaNonce']
+var _js_getCSparkMintMetaDiversifier = Module['_js_getCSparkMintMetaDiversifier'] = wasmExports['js_getCSparkMintMetaDiversifier']
+var _js_getCSparkMintMetaValue = Module['_js_getCSparkMintMetaValue'] = wasmExports['js_getCSparkMintMetaValue']
+var _js_getCSparkMintMetaType = Module['_js_getCSparkMintMetaType'] = wasmExports['js_getCSparkMintMetaType']
+var _js_getCSparkMintMetaCoin = Module['_js_getCSparkMintMetaCoin'] = wasmExports['js_getCSparkMintMetaCoin']
+var _js_setCSparkMintMetaId = Module['_js_setCSparkMintMetaId'] = wasmExports['js_setCSparkMintMetaId']
+var _js_setCSparkMintMetaHeight = Module['_js_setCSparkMintMetaHeight'] = wasmExports['js_setCSparkMintMetaHeight']
+var _js_getCoinHash = Module['_js_getCoinHash'] = wasmExports['js_getCoinHash']
+var _js_getInputCoinDataCoverSetId = Module['_js_getInputCoinDataCoverSetId'] = wasmExports['js_getInputCoinDataCoverSetId']
+var _js_getInputCoinDataIndex = Module['_js_getInputCoinDataIndex'] = wasmExports['js_getInputCoinDataIndex']
+var _js_getInputCoinDataValue = Module['_js_getInputCoinDataValue'] = wasmExports['js_getInputCoinDataValue']
+var _js_getInputCoinDataTag_hex = Module['_js_getInputCoinDataTag_hex'] = wasmExports['js_getInputCoinDataTag_hex']
+var _js_getInputCoinDataTag_base64 = Module['_js_getInputCoinDataTag_base64'] = wasmExports['js_getInputCoinDataTag_base64']
+var _js_createRecipientsVectorForCreateSparkSpendTransaction = Module['_js_createRecipientsVectorForCreateSparkSpendTransaction'] = wasmExports['js_createRecipientsVectorForCreateSparkSpendTransaction']
+var _js_addRecipientForCreateSparkSpendTransaction = Module['_js_addRecipientForCreateSparkSpendTransaction'] = wasmExports['js_addRecipientForCreateSparkSpendTransaction']
+var _js_createPrivateRecipientsVectorForCreateSparkSpendTransaction = Module['_js_createPrivateRecipientsVectorForCreateSparkSpendTransaction'] = wasmExports['js_createPrivateRecipientsVectorForCreateSparkSpendTransaction']
+var _js_addPrivateRecipientForCreateSparkSpendTransaction = Module['_js_addPrivateRecipientForCreateSparkSpendTransaction'] = wasmExports['js_addPrivateRecipientForCreateSparkSpendTransaction']
+var _js_createCoinsListForCreateSparkSpendTransaction = Module['_js_createCoinsListForCreateSparkSpendTransaction'] = wasmExports['js_createCoinsListForCreateSparkSpendTransaction']
+var _js_addCoinToListForCreateSparkSpendTransaction = Module['_js_addCoinToListForCreateSparkSpendTransaction'] = wasmExports['js_addCoinToListForCreateSparkSpendTransaction']
+var _js_createCoverSetData = Module['_js_createCoverSetData'] = wasmExports['js_createCoverSetData']
+var _js_addCoinToCoverSetData = Module['_js_addCoinToCoverSetData'] = wasmExports['js_addCoinToCoverSetData']
+var _js_createCoverSetDataMapForCreateSparkSpendTransaction = Module['_js_createCoverSetDataMapForCreateSparkSpendTransaction'] = wasmExports['js_createCoverSetDataMapForCreateSparkSpendTransaction']
+var _js_addCoverSetDataForCreateSparkSpendTransaction = Module['_js_addCoverSetDataForCreateSparkSpendTransaction'] = wasmExports['js_addCoverSetDataForCreateSparkSpendTransaction']
+var _js_moveAddCoverSetDataForCreateSparkSpendTransaction = Module['_js_moveAddCoverSetDataForCreateSparkSpendTransaction'] = wasmExports['js_moveAddCoverSetDataForCreateSparkSpendTransaction']
+var _js_createIdAndBlockHashesMapForCreateSparkSpendTransaction = Module['_js_createIdAndBlockHashesMapForCreateSparkSpendTransaction'] = wasmExports['js_createIdAndBlockHashesMapForCreateSparkSpendTransaction']
+var _js_addIdAndBlockHashForCreateSparkSpendTransaction = Module['_js_addIdAndBlockHashForCreateSparkSpendTransaction'] = wasmExports['js_addIdAndBlockHashForCreateSparkSpendTransaction']
+var _js_createSparkSpendTransaction = Module['_js_createSparkSpendTransaction'] = wasmExports['js_createSparkSpendTransaction']
+var _js_getCreateSparkSpendTxResultSerializedSpend = Module['_js_getCreateSparkSpendTxResultSerializedSpend'] = wasmExports['js_getCreateSparkSpendTxResultSerializedSpend']
+var _js_getCreateSparkSpendTxResultSerializedSpendSize = Module['_js_getCreateSparkSpendTxResultSerializedSpendSize'] = wasmExports['js_getCreateSparkSpendTxResultSerializedSpendSize']
+var _js_getCreateSparkSpendTxResultOutputScriptsSize = Module['_js_getCreateSparkSpendTxResultOutputScriptsSize'] = wasmExports['js_getCreateSparkSpendTxResultOutputScriptsSize']
+var _js_getCreateSparkSpendTxResultOutputScriptAt = Module['_js_getCreateSparkSpendTxResultOutputScriptAt'] = wasmExports['js_getCreateSparkSpendTxResultOutputScriptAt']
+var _js_getCreateSparkSpendTxResultOutputScriptSizeAt = Module['_js_getCreateSparkSpendTxResultOutputScriptSizeAt'] = wasmExports['js_getCreateSparkSpendTxResultOutputScriptSizeAt']
+var _js_getCreateSparkSpendTxResultSpentCoinsSize = Module['_js_getCreateSparkSpendTxResultSpentCoinsSize'] = wasmExports['js_getCreateSparkSpendTxResultSpentCoinsSize']
+var _js_getCreateSparkSpendTxResultSpentCoinAt = Module['_js_getCreateSparkSpendTxResultSpentCoinAt'] = wasmExports['js_getCreateSparkSpendTxResultSpentCoinAt']
+var _js_getCreateSparkSpendTxResultFee = Module['_js_getCreateSparkSpendTxResultFee'] = wasmExports['js_getCreateSparkSpendTxResultFee']
+var _js_freeSpendKeyData = Module['_js_freeSpendKeyData'] = wasmExports['js_freeSpendKeyData']
+var _js_freeSpendKey = Module['_js_freeSpendKey'] = wasmExports['js_freeSpendKey']
+var _js_freeFullViewKey = Module['_js_freeFullViewKey'] = wasmExports['js_freeFullViewKey']
+var _js_freeIncomingViewKey = Module['_js_freeIncomingViewKey'] = wasmExports['js_freeIncomingViewKey']
+var _js_freeAddress = Module['_js_freeAddress'] = wasmExports['js_freeAddress']
+var _js_freeRecipientVector = Module['_js_freeRecipientVector'] = wasmExports['js_freeRecipientVector']
+var _js_freeCSparkMintMeta = Module['_js_freeCSparkMintMeta'] = wasmExports['js_freeCSparkMintMeta']
+var _js_freeInputCoinData = Module['_js_freeInputCoinData'] = wasmExports['js_freeInputCoinData']
+var _js_freeIdentifiedCoinData = Module['_js_freeIdentifiedCoinData'] = wasmExports['js_freeIdentifiedCoinData']
+var _js_freeCoin = Module['_js_freeCoin'] = wasmExports['js_freeCoin']
+var _js_freeSparkSpendRecipientsVector = Module['_js_freeSparkSpendRecipientsVector'] = wasmExports['js_freeSparkSpendRecipientsVector']
+var _js_freeSparkSpendPrivateRecipientsVector = Module['_js_freeSparkSpendPrivateRecipientsVector'] = wasmExports['js_freeSparkSpendPrivateRecipientsVector']
+var _js_freeSparkSpendCoinsList = Module['_js_freeSparkSpendCoinsList'] = wasmExports['js_freeSparkSpendCoinsList']
+var _js_freeCoverSetData = Module['_js_freeCoverSetData'] = wasmExports['js_freeCoverSetData']
+var _js_freeCoverSetDataMapForCreateSparkSpendTransaction = Module['_js_freeCoverSetDataMapForCreateSparkSpendTransaction'] = wasmExports['js_freeCoverSetDataMapForCreateSparkSpendTransaction']
+var _js_freeIdAndBlockHashesMap = Module['_js_freeIdAndBlockHashesMap'] = wasmExports['js_freeIdAndBlockHashesMap']
+var _js_freeCreateSparkSpendTxResult = Module['_js_freeCreateSparkSpendTxResult'] = wasmExports['js_freeCreateSparkSpendTxResult']
+var _htonl = wasmExports['htonl']
+var _htons = wasmExports['htons']
+var _emscripten_builtin_memalign = wasmExports['emscripten_builtin_memalign']
+var _setThrew = wasmExports['setThrew']
+var __emscripten_tempret_set = wasmExports['_emscripten_tempret_set']
+var __emscripten_stack_restore = wasmExports['_emscripten_stack_restore']
+var __emscripten_stack_alloc = wasmExports['_emscripten_stack_alloc']
+var _emscripten_stack_get_current = wasmExports['emscripten_stack_get_current']
+var ___cxa_decrement_exception_refcount = wasmExports['__cxa_decrement_exception_refcount']
+var ___cxa_increment_exception_refcount = wasmExports['__cxa_increment_exception_refcount']
+var ___cxa_can_catch = wasmExports['__cxa_can_catch']
+var ___cxa_get_exception_ptr = wasmExports['__cxa_get_exception_ptr']
+
+function invoke_vii(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_ii(index,a1) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_i(index) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)();
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiii(index,a1,a2,a3,a4,a5) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_vi(index,a1) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iii(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iij(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiii(index,a1,a2,a3,a4,a5,a6,a7) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_v(index) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)();
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viij(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_jii(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ return 0n;
+ }
+}
+
+function invoke_viiiii(index,a1,a2,a3,a4,a5) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_ijiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiij(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_vij(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiii(index,a1,a2,a3,a4,a5,a6) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiijji(index,a1,a2,a3,a4,a5,a6,a7,a8,a9) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiii(index,a1,a2,a3,a4,a5,a6) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiij(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiij(index,a1,a2,a3,a4,a5) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiii(index,a1,a2,a3,a4,a5,a6,a7) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_jiiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ return 0n;
+ }
+}
+
+function invoke_iiiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_fiii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_diii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+
+// include: postamble.js
+// === Auto-generated postamble setup entry stuff ===
+
+Module['ccall'] = ccall;
+Module['cwrap'] = cwrap;
+
+
+function run() {
+
+ if (runDependencies > 0) {
+ dependenciesFulfilled = run;
+ return;
+ }
+
+ preRun();
+
+ // a preRun added a dependency, run will be called later
+ if (runDependencies > 0) {
+ dependenciesFulfilled = run;
+ return;
+ }
+
+ function doRun() {
+ // run may have just been called through dependencies being fulfilled just in this very frame,
+ // or while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ if (ABORT) return;
+
+ initRuntime();
+
+ readyPromiseResolve(Module);
+ Module['onRuntimeInitialized']?.();
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(() => {
+ setTimeout(() => Module['setStatus'](''), 1);
+ doRun();
+ }, 1);
+ } else
+ {
+ doRun();
+ }
+}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+run();
+
+// end include: postamble.js
+
+// include: postamble_modularize.js
+// In MODULARIZE mode we wrap the generated code in a factory function
+// and return either the Module itself, or a promise of the module.
+//
+// We assign to the `moduleRtn` global here and configure closure to see
+// this as and extern so it won't get minified.
+
+moduleRtn = readyPromise;
+
+// end include: postamble_modularize.js
+
+
+
+ return moduleRtn;
+}
+);
+})();
+export default Module;
diff --git a/packages/extension/src/libs/utils/wasmModule/spark.wasm b/packages/extension/src/libs/utils/wasmModule/spark.wasm
new file mode 100644
index 000000000..b45075cec
Binary files /dev/null and b/packages/extension/src/libs/utils/wasmModule/spark.wasm differ
diff --git a/packages/extension/src/libs/utils/wasmWorkerModule/spark.js b/packages/extension/src/libs/utils/wasmWorkerModule/spark.js
new file mode 100644
index 000000000..72d847a38
--- /dev/null
+++ b/packages/extension/src/libs/utils/wasmWorkerModule/spark.js
@@ -0,0 +1,4877 @@
+var Module = (() => {
+ var _scriptName = import.meta.url;
+
+ return (
+async function(moduleArg = {}) {
+ var moduleRtn;
+
+// include: shell.js
+// The Module object: Our interface to the outside world. We import
+// and export values on it. There are various ways Module can be used:
+// 1. Not defined. We create it here
+// 2. A function parameter, function(moduleArg) => Promise
+// 3. pre-run appended it, var Module = {}; ..generated code..
+// 4. External script tag defines var Module.
+// We need to check if Module already exists (e.g. case 3 above).
+// Substitution will be replaced with actual code on later stage of the build,
+// this way Closure Compiler will not mangle it (e.g. case 4. above).
+// Note that if you want to run closure, and also to use Module
+// after the generated code, you will need to define var Module = {};
+// before the code. Then that object will be used in the code, and you
+// can continue to use Module afterwards as well.
+var Module = moduleArg;
+
+// Set up the promise that indicates the Module is initialized
+var readyPromiseResolve, readyPromiseReject;
+var readyPromise = new Promise((resolve, reject) => {
+ readyPromiseResolve = resolve;
+ readyPromiseReject = reject;
+});
+
+// Determine the runtime environment we are in. You can customize this by
+// setting the ENVIRONMENT setting at compile time (see settings.js).
+
+// Attempt to auto-detect the environment
+var ENVIRONMENT_IS_WEB = typeof window == 'object';
+var ENVIRONMENT_IS_WORKER = typeof WorkerGlobalScope != 'undefined';
+// N.b. Electron.js environment is simultaneously a NODE-environment, but
+// also a web environment.
+var ENVIRONMENT_IS_NODE = typeof process == 'object' && typeof process.versions == 'object' && typeof process.versions.node == 'string' && process.type != 'renderer';
+var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+if (ENVIRONMENT_IS_NODE) {
+ // When building an ES module `require` is not normally available.
+ // We need to use `createRequire()` to construct the require()` function.
+ const { createRequire } = await import('module');
+ /** @suppress{duplicate} */
+ var require = createRequire('/');
+
+}
+
+// --pre-jses are emitted after the Module integration code, so that they can
+// refer to Module (if they choose; they can also define Module)
+
+
+// Sometimes an existing Module object exists with properties
+// meant to overwrite the default module functionality. Here
+// we collect those properties and reapply _after_ we configure
+// the current environment's defaults to avoid having to be so
+// defensive during initialization.
+var moduleOverrides = Object.assign({}, Module);
+
+var arguments_ = [];
+var thisProgram = './this.program';
+var quit_ = (status, toThrow) => {
+ throw toThrow;
+};
+
+// `/` should be present at the end if `scriptDirectory` is not empty
+var scriptDirectory = '';
+function locateFile(path) {
+ if (Module['locateFile']) {
+ return Module['locateFile'](path, scriptDirectory);
+ }
+ return scriptDirectory + path;
+}
+
+// Hooks that are implemented differently in different runtime environments.
+var readAsync, readBinary;
+
+if (ENVIRONMENT_IS_NODE) {
+
+ // These modules will usually be used on Node.js. Load them eagerly to avoid
+ // the complexity of lazy-loading.
+ var fs = require('fs');
+ var nodePath = require('path');
+
+ // EXPORT_ES6 + ENVIRONMENT_IS_NODE always requires use of import.meta.url,
+ // since there's no way getting the current absolute path of the module when
+ // support for that is not available.
+ if (!import.meta.url.startsWith('data:')) {
+ scriptDirectory = nodePath.dirname(require('url').fileURLToPath(import.meta.url)) + '/';
+ }
+
+// include: node_shell_read.js
+readBinary = (filename) => {
+ // We need to re-wrap `file://` strings to URLs.
+ filename = isFileURI(filename) ? new URL(filename) : filename;
+ var ret = fs.readFileSync(filename);
+ return ret;
+};
+
+readAsync = async (filename, binary = true) => {
+ // See the comment in the `readBinary` function.
+ filename = isFileURI(filename) ? new URL(filename) : filename;
+ var ret = fs.readFileSync(filename, binary ? undefined : 'utf8');
+ return ret;
+};
+// end include: node_shell_read.js
+ if (!Module['thisProgram'] && process.argv.length > 1) {
+ thisProgram = process.argv[1].replace(/\\/g, '/');
+ }
+
+ arguments_ = process.argv.slice(2);
+
+ // MODULARIZE will export the module in the proper place outside, we don't need to export here
+
+ quit_ = (status, toThrow) => {
+ process.exitCode = status;
+ throw toThrow;
+ };
+
+} else
+
+// Note that this includes Node.js workers when relevant (pthreads is enabled).
+// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and
+// ENVIRONMENT_IS_NODE.
+if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ if (ENVIRONMENT_IS_WORKER) { // Check worker, not web, since window could be polyfilled
+ scriptDirectory = self.location.href;
+ } else if (typeof document != 'undefined' && document.currentScript) { // web
+ scriptDirectory = document.currentScript.src;
+ }
+ // When MODULARIZE, this JS may be executed later, after document.currentScript
+ // is gone, so we saved it, and we use it here instead of any other info.
+ if (_scriptName) {
+ scriptDirectory = _scriptName;
+ }
+ // blob urls look like blob:http://site.com/etc/etc and we cannot infer anything from them.
+ // otherwise, slice off the final part of the url to find the script directory.
+ // if scriptDirectory does not contain a slash, lastIndexOf will return -1,
+ // and scriptDirectory will correctly be replaced with an empty string.
+ // If scriptDirectory contains a query (starting with ?) or a fragment (starting with #),
+ // they are removed because they could contain a slash.
+ if (scriptDirectory.startsWith('blob:')) {
+ scriptDirectory = '';
+ } else {
+ scriptDirectory = scriptDirectory.substr(0, scriptDirectory.replace(/[?#].*/, '').lastIndexOf('/')+1);
+ }
+
+ {
+// include: web_or_worker_shell_read.js
+if (ENVIRONMENT_IS_WORKER) {
+ readBinary = (url) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ xhr.responseType = 'arraybuffer';
+ xhr.send(null);
+ return new Uint8Array(/** @type{!ArrayBuffer} */(xhr.response));
+ };
+ }
+
+ readAsync = async (url) => {
+ // Fetch has some additional restrictions over XHR, like it can't be used on a file:// url.
+ // See https://github.com/github/fetch/pull/92#issuecomment-140665932
+ // Cordova or Electron apps are typically loaded from a file:// url.
+ // So use XHR on webview if URL is a file URL.
+ if (isFileURI(url)) {
+ return new Promise((resolve, reject) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.responseType = 'arraybuffer';
+ xhr.onload = () => {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0
+ resolve(xhr.response);
+ return;
+ }
+ reject(xhr.status);
+ };
+ xhr.onerror = reject;
+ xhr.send(null);
+ });
+ }
+ var response = await fetch(url, { credentials: 'same-origin' });
+ if (response.ok) {
+ return response.arrayBuffer();
+ }
+ throw new Error(response.status + ' : ' + response.url);
+ };
+// end include: web_or_worker_shell_read.js
+ }
+} else
+{
+}
+
+var out = Module['print'] || console.log.bind(console);
+var err = Module['printErr'] || console.error.bind(console);
+
+// Merge back in the overrides
+Object.assign(Module, moduleOverrides);
+// Free the object hierarchy contained in the overrides, this lets the GC
+// reclaim data used.
+moduleOverrides = null;
+
+// Emit code to handle expected values on the Module object. This applies Module.x
+// to the proper local x. This has two benefits: first, we only emit it if it is
+// expected to arrive, and second, by using a local everywhere else that can be
+// minified.
+
+if (Module['arguments']) arguments_ = Module['arguments'];
+
+if (Module['thisProgram']) thisProgram = Module['thisProgram'];
+
+// perform assertions in shell.js after we set up out() and err(), as otherwise if an assertion fails it cannot print the message
+// end include: shell.js
+
+// include: preamble.js
+// === Preamble library stuff ===
+
+// Documentation for the public APIs defined in this file must be updated in:
+// site/source/docs/api_reference/preamble.js.rst
+// A prebuilt local version of the documentation is available at:
+// site/build/text/docs/api_reference/preamble.js.txt
+// You can also build docs locally as HTML or other formats in site/
+// An online HTML version (which may be of a different version of Emscripten)
+// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html
+
+var wasmBinary = Module['wasmBinary'];
+
+// Wasm globals
+
+var wasmMemory;
+
+//========================================
+// Runtime essentials
+//========================================
+
+// whether we are quitting the application. no code should run after this.
+// set in exit() and abort()
+var ABORT = false;
+
+// set by exit() and abort(). Passed to 'onExit' handler.
+// NOTE: This is also used as the process return code code in shell environments
+// but only when noExitRuntime is false.
+var EXITSTATUS;
+
+// In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we
+// don't define it at all in release modes. This matches the behaviour of
+// MINIMAL_RUNTIME.
+// TODO(sbc): Make this the default even without STRICT enabled.
+/** @type {function(*, string=)} */
+function assert(condition, text) {
+ if (!condition) {
+ // This build was created without ASSERTIONS defined. `assert()` should not
+ // ever be called in this configuration but in case there are callers in
+ // the wild leave this simple abort() implementation here for now.
+ abort(text);
+ }
+}
+
+// Memory management
+
+var HEAP,
+/** @type {!Int8Array} */
+ HEAP8,
+/** @type {!Uint8Array} */
+ HEAPU8,
+/** @type {!Int16Array} */
+ HEAP16,
+/** @type {!Uint16Array} */
+ HEAPU16,
+/** @type {!Int32Array} */
+ HEAP32,
+/** @type {!Uint32Array} */
+ HEAPU32,
+/** @type {!Float32Array} */
+ HEAPF32,
+/* BigInt64Array type is not correctly defined in closure
+/** not-@type {!BigInt64Array} */
+ HEAP64,
+/* BigUint64Array type is not correctly defined in closure
+/** not-t@type {!BigUint64Array} */
+ HEAPU64,
+/** @type {!Float64Array} */
+ HEAPF64;
+
+var runtimeInitialized = false;
+
+// include: URIUtils.js
+// Prefix of data URIs emitted by SINGLE_FILE and related options.
+var dataURIPrefix = 'data:application/octet-stream;base64,';
+
+/**
+ * Indicates whether filename is a base64 data URI.
+ * @noinline
+ */
+var isDataURI = (filename) => filename.startsWith(dataURIPrefix);
+
+/**
+ * Indicates whether filename is delivered via file protocol (as opposed to http/https)
+ * @noinline
+ */
+var isFileURI = (filename) => filename.startsWith('file://');
+// end include: URIUtils.js
+// include: runtime_shared.js
+// include: runtime_stack_check.js
+// end include: runtime_stack_check.js
+// include: runtime_exceptions.js
+// end include: runtime_exceptions.js
+// include: runtime_debug.js
+// end include: runtime_debug.js
+// include: memoryprofiler.js
+// end include: memoryprofiler.js
+
+
+function updateMemoryViews() {
+ var b = wasmMemory.buffer;
+ Module['HEAP8'] = HEAP8 = new Int8Array(b);
+ Module['HEAP16'] = HEAP16 = new Int16Array(b);
+ Module['HEAPU8'] = HEAPU8 = new Uint8Array(b);
+ Module['HEAPU16'] = HEAPU16 = new Uint16Array(b);
+ Module['HEAP32'] = HEAP32 = new Int32Array(b);
+ Module['HEAPU32'] = HEAPU32 = new Uint32Array(b);
+ Module['HEAPF32'] = HEAPF32 = new Float32Array(b);
+ Module['HEAPF64'] = HEAPF64 = new Float64Array(b);
+ Module['HEAP64'] = HEAP64 = new BigInt64Array(b);
+ Module['HEAPU64'] = HEAPU64 = new BigUint64Array(b);
+}
+
+// end include: runtime_shared.js
+var __ATPRERUN__ = []; // functions called before the runtime is initialized
+var __ATINIT__ = []; // functions called during startup
+var __ATEXIT__ = []; // functions called during shutdown
+var __ATPOSTRUN__ = []; // functions called after the main() is called
+
+function preRun() {
+ if (Module['preRun']) {
+ if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
+ while (Module['preRun'].length) {
+ addOnPreRun(Module['preRun'].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+}
+
+function initRuntime() {
+ runtimeInitialized = true;
+
+
+if (!Module['noFSInit'] && !FS.initialized)
+ FS.init();
+FS.ignorePermissions = false;
+
+TTY.init();
+ callRuntimeCallbacks(__ATINIT__);
+}
+
+function postRun() {
+
+ if (Module['postRun']) {
+ if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
+ while (Module['postRun'].length) {
+ addOnPostRun(Module['postRun'].shift());
+ }
+ }
+
+ callRuntimeCallbacks(__ATPOSTRUN__);
+}
+
+function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+}
+
+function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+}
+
+function addOnExit(cb) {
+}
+
+function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+}
+
+// A counter of dependencies for calling run(). If we need to
+// do asynchronous work before running, increment this and
+// decrement it. Incrementing must happen in a place like
+// Module.preRun (used by emcc to add file preloading).
+// Note that you can add dependencies in preRun, even though
+// it happens right before run - run will be postponed until
+// the dependencies are met.
+var runDependencies = 0;
+var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled
+
+function getUniqueRunDependency(id) {
+ return id;
+}
+
+function addRunDependency(id) {
+ runDependencies++;
+
+ Module['monitorRunDependencies']?.(runDependencies);
+
+}
+
+function removeRunDependency(id) {
+ runDependencies--;
+
+ Module['monitorRunDependencies']?.(runDependencies);
+
+ if (runDependencies == 0) {
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback(); // can add another dependenciesFulfilled
+ }
+ }
+}
+
+/** @param {string|number=} what */
+function abort(what) {
+ Module['onAbort']?.(what);
+
+ what = 'Aborted(' + what + ')';
+ // TODO(sbc): Should we remove printing and leave it up to whoever
+ // catches the exception?
+ err(what);
+
+ ABORT = true;
+
+ what += '. Build with -sASSERTIONS for more info.';
+
+ // Use a wasm runtime error, because a JS error might be seen as a foreign
+ // exception, which means we'd run destructors on it. We need the error to
+ // simply make the program stop.
+ // FIXME This approach does not work in Wasm EH because it currently does not assume
+ // all RuntimeErrors are from traps; it decides whether a RuntimeError is from
+ // a trap or not based on a hidden field within the object. So at the moment
+ // we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that
+ // allows this in the wasm spec.
+
+ // Suppress closure compiler warning here. Closure compiler's builtin extern
+ // definition for WebAssembly.RuntimeError claims it takes no arguments even
+ // though it can.
+ // TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed.
+ /** @suppress {checkTypes} */
+ var e = new WebAssembly.RuntimeError(message, fileName, lineNumber);
+
+ readyPromiseReject(e);
+ // Throw the error whether or not MODULARIZE is set because abort is used
+ // in code paths apart from instantiation where an exception is expected
+ // to be thrown when abort is called.
+ throw e;
+}
+
+var wasmBinaryFile;
+function findWasmBinary() {
+ if (Module['locateFile']) {
+ var f = 'spark.wasm';
+ if (!isDataURI(f)) {
+ return locateFile(f);
+ }
+ return f;
+ }
+ // Use bundler-friendly `new URL(..., import.meta.url)` pattern; works in browsers too.
+ return new URL('spark.wasm', import.meta.url).href;
+}
+
+function getBinarySync(file) {
+ if (file == wasmBinaryFile && wasmBinary) {
+ return new Uint8Array(wasmBinary);
+ }
+ if (readBinary) {
+ return readBinary(file);
+ }
+ throw 'both async and sync fetching of the wasm failed';
+}
+
+async function getWasmBinary(binaryFile) {
+ // If we don't have the binary yet, load it asynchronously using readAsync.
+ if (!wasmBinary
+ ) {
+ // Fetch the binary using readAsync
+ try {
+ var response = await readAsync(binaryFile);
+ return new Uint8Array(response);
+ } catch {
+ // Fall back to getBinarySync below;
+ }
+ }
+
+ // Otherwise, getBinarySync should be able to get it synchronously
+ return getBinarySync(binaryFile);
+}
+
+async function instantiateArrayBuffer(binaryFile, imports) {
+ try {
+ var binary = await getWasmBinary(binaryFile);
+ var instance = await WebAssembly.instantiate(binary, imports);
+ return instance;
+ } catch (reason) {
+ err(`failed to asynchronously prepare wasm: ${reason}`);
+
+ abort(reason);
+ }
+}
+
+async function instantiateAsync(binary, binaryFile, imports) {
+ if (!binary &&
+ typeof WebAssembly.instantiateStreaming == 'function' &&
+ !isDataURI(binaryFile)
+ // Don't use streaming for file:// delivered objects in a webview, fetch them synchronously.
+ && !isFileURI(binaryFile)
+ // Avoid instantiateStreaming() on Node.js environment for now, as while
+ // Node.js v18.1.0 implements it, it does not have a full fetch()
+ // implementation yet.
+ //
+ // Reference:
+ // https://github.com/emscripten-core/emscripten/pull/16917
+ && !ENVIRONMENT_IS_NODE
+ ) {
+ try {
+ var response = fetch(binaryFile, { credentials: 'same-origin' });
+ var instantiationResult = await WebAssembly.instantiateStreaming(response, imports);
+ return instantiationResult;
+ } catch (reason) {
+ // We expect the most common failure cause to be a bad MIME type for the binary,
+ // in which case falling back to ArrayBuffer instantiation should work.
+ err(`wasm streaming compile failed: ${reason}`);
+ err('falling back to ArrayBuffer instantiation');
+ // fall back of instantiateArrayBuffer below
+ };
+ }
+ return instantiateArrayBuffer(binaryFile, imports);
+}
+
+function getWasmImports() {
+ // prepare imports
+ return {
+ 'env': wasmImports,
+ 'wasi_snapshot_preview1': wasmImports,
+ }
+}
+
+// Create the wasm instance.
+// Receives the wasm imports, returns the exports.
+async function createWasm() {
+ // Load the wasm module and create an instance of using native support in the JS engine.
+ // handle a generated wasm instance, receiving its exports and
+ // performing other necessary setup
+ /** @param {WebAssembly.Module=} module*/
+ function receiveInstance(instance, module) {
+ wasmExports = instance.exports;
+
+
+
+ wasmMemory = wasmExports['memory'];
+
+ updateMemoryViews();
+
+ wasmTable = wasmExports['__indirect_function_table'];
+
+
+ addOnInit(wasmExports['__wasm_call_ctors']);
+
+ removeRunDependency('wasm-instantiate');
+ return wasmExports;
+ }
+ // wait for the pthread pool (if any)
+ addRunDependency('wasm-instantiate');
+
+ // Prefer streaming instantiation if available.
+ function receiveInstantiationResult(result) {
+ // 'result' is a ResultObject object which has both the module and instance.
+ // receiveInstance() will swap in the exports (to Module.asm) so they can be called
+ // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line.
+ // When the regression is fixed, can restore the above PTHREADS-enabled path.
+ return receiveInstance(result['instance']);
+ }
+
+ var info = getWasmImports();
+
+ // User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback
+ // to manually instantiate the Wasm module themselves. This allows pages to
+ // run the instantiation parallel to any other async startup actions they are
+ // performing.
+ // Also pthreads and wasm workers initialize the wasm instance through this
+ // path.
+ if (Module['instantiateWasm']) {
+ try {
+ return Module['instantiateWasm'](info, receiveInstance);
+ } catch(e) {
+ err(`Module.instantiateWasm callback failed with error: ${e}`);
+ // If instantiation fails, reject the module ready promise.
+ readyPromiseReject(e);
+ }
+ }
+
+ wasmBinaryFile ??= findWasmBinary();
+
+ try {
+ var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info);
+ var exports = receiveInstantiationResult(result);
+ return exports;
+ } catch (e) {
+ // If instantiation fails, reject the module ready promise.
+ readyPromiseReject(e);
+ return Promise.reject(e);
+ }
+}
+
+// === Body ===
+// end include: preamble.js
+
+
+ class ExitStatus {
+ name = 'ExitStatus';
+ constructor(status) {
+ this.message = `Program terminated with exit(${status})`;
+ this.status = status;
+ }
+ }
+
+ var callRuntimeCallbacks = (callbacks) => {
+ while (callbacks.length > 0) {
+ // Pass the module as the first argument.
+ callbacks.shift()(Module);
+ }
+ };
+
+
+ /**
+ * @param {number} ptr
+ * @param {string} type
+ */
+ function getValue(ptr, type = 'i8') {
+ if (type.endsWith('*')) type = '*';
+ switch (type) {
+ case 'i1': return HEAP8[ptr];
+ case 'i8': return HEAP8[ptr];
+ case 'i16': return HEAP16[((ptr)>>1)];
+ case 'i32': return HEAP32[((ptr)>>2)];
+ case 'i64': return HEAP64[((ptr)>>3)];
+ case 'float': return HEAPF32[((ptr)>>2)];
+ case 'double': return HEAPF64[((ptr)>>3)];
+ case '*': return HEAPU32[((ptr)>>2)];
+ default: abort(`invalid type for getValue: ${type}`);
+ }
+ }
+
+ var noExitRuntime = Module['noExitRuntime'] || true;
+
+
+ /**
+ * @param {number} ptr
+ * @param {number} value
+ * @param {string} type
+ */
+ function setValue(ptr, value, type = 'i8') {
+ if (type.endsWith('*')) type = '*';
+ switch (type) {
+ case 'i1': HEAP8[ptr] = value; break;
+ case 'i8': HEAP8[ptr] = value; break;
+ case 'i16': HEAP16[((ptr)>>1)] = value; break;
+ case 'i32': HEAP32[((ptr)>>2)] = value; break;
+ case 'i64': HEAP64[((ptr)>>3)] = BigInt(value); break;
+ case 'float': HEAPF32[((ptr)>>2)] = value; break;
+ case 'double': HEAPF64[((ptr)>>3)] = value; break;
+ case '*': HEAPU32[((ptr)>>2)] = value; break;
+ default: abort(`invalid type for setValue: ${type}`);
+ }
+ }
+
+ var stackRestore = (val) => __emscripten_stack_restore(val);
+
+ var stackSave = () => _emscripten_stack_get_current();
+
+ var UTF8Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder() : undefined;
+
+ /**
+ * Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given
+ * array that contains uint8 values, returns a copy of that string as a
+ * Javascript String object.
+ * heapOrArray is either a regular array, or a JavaScript typed array view.
+ * @param {number=} idx
+ * @param {number=} maxBytesToRead
+ * @return {string}
+ */
+ var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead = NaN) => {
+ var endIdx = idx + maxBytesToRead;
+ var endPtr = idx;
+ // TextDecoder needs to know the byte length in advance, it doesn't stop on
+ // null terminator by itself. Also, use the length info to avoid running tiny
+ // strings through TextDecoder, since .subarray() allocates garbage.
+ // (As a tiny code save trick, compare endPtr against endIdx using a negation,
+ // so that undefined/NaN means Infinity)
+ while (heapOrArray[endPtr] && !(endPtr >= endIdx)) ++endPtr;
+
+ if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) {
+ return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr));
+ }
+ var str = '';
+ // If building with TextDecoder, we have already computed the string length
+ // above, so test loop end condition against that
+ while (idx < endPtr) {
+ // For UTF8 byte structure, see:
+ // http://en.wikipedia.org/wiki/UTF-8#Description
+ // https://www.ietf.org/rfc/rfc2279.txt
+ // https://tools.ietf.org/html/rfc3629
+ var u0 = heapOrArray[idx++];
+ if (!(u0 & 0x80)) { str += String.fromCharCode(u0); continue; }
+ var u1 = heapOrArray[idx++] & 63;
+ if ((u0 & 0xE0) == 0xC0) { str += String.fromCharCode(((u0 & 31) << 6) | u1); continue; }
+ var u2 = heapOrArray[idx++] & 63;
+ if ((u0 & 0xF0) == 0xE0) {
+ u0 = ((u0 & 15) << 12) | (u1 << 6) | u2;
+ } else {
+ u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63);
+ }
+
+ if (u0 < 0x10000) {
+ str += String.fromCharCode(u0);
+ } else {
+ var ch = u0 - 0x10000;
+ str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
+ }
+ }
+ return str;
+ };
+
+ /**
+ * Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the
+ * emscripten HEAP, returns a copy of that string as a Javascript String object.
+ *
+ * @param {number} ptr
+ * @param {number=} maxBytesToRead - An optional length that specifies the
+ * maximum number of bytes to read. You can omit this parameter to scan the
+ * string until the first 0 byte. If maxBytesToRead is passed, and the string
+ * at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the
+ * string will cut short at that byte index (i.e. maxBytesToRead will not
+ * produce a string of exact length [ptr, ptr+maxBytesToRead[) N.B. mixing
+ * frequent uses of UTF8ToString() with and without maxBytesToRead may throw
+ * JS JIT optimizations off, so it is worth to consider consistently using one
+ * @return {string}
+ */
+ var UTF8ToString = (ptr, maxBytesToRead) => {
+ return ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead) : '';
+ };
+ var ___assert_fail = (condition, filename, line, func) =>
+ abort(`Assertion failed: ${UTF8ToString(condition)}, at: ` + [filename ? UTF8ToString(filename) : 'unknown filename', line, func ? UTF8ToString(func) : 'unknown function']);
+
+ var exceptionCaught = [];
+
+
+
+ var uncaughtExceptionCount = 0;
+ var ___cxa_begin_catch = (ptr) => {
+ var info = new ExceptionInfo(ptr);
+ if (!info.get_caught()) {
+ info.set_caught(true);
+ uncaughtExceptionCount--;
+ }
+ info.set_rethrown(false);
+ exceptionCaught.push(info);
+ ___cxa_increment_exception_refcount(ptr);
+ return ___cxa_get_exception_ptr(ptr);
+ };
+
+
+ var exceptionLast = 0;
+
+
+ var ___cxa_end_catch = () => {
+ // Clear state flag.
+ _setThrew(0, 0);
+ // Call destructor if one is registered then clear it.
+ var info = exceptionCaught.pop();
+
+ ___cxa_decrement_exception_refcount(info.excPtr);
+ exceptionLast = 0; // XXX in decRef?
+ };
+
+
+ class ExceptionInfo {
+ // excPtr - Thrown object pointer to wrap. Metadata pointer is calculated from it.
+ constructor(excPtr) {
+ this.excPtr = excPtr;
+ this.ptr = excPtr - 24;
+ }
+
+ set_type(type) {
+ HEAPU32[(((this.ptr)+(4))>>2)] = type;
+ }
+
+ get_type() {
+ return HEAPU32[(((this.ptr)+(4))>>2)];
+ }
+
+ set_destructor(destructor) {
+ HEAPU32[(((this.ptr)+(8))>>2)] = destructor;
+ }
+
+ get_destructor() {
+ return HEAPU32[(((this.ptr)+(8))>>2)];
+ }
+
+ set_caught(caught) {
+ caught = caught ? 1 : 0;
+ HEAP8[(this.ptr)+(12)] = caught;
+ }
+
+ get_caught() {
+ return HEAP8[(this.ptr)+(12)] != 0;
+ }
+
+ set_rethrown(rethrown) {
+ rethrown = rethrown ? 1 : 0;
+ HEAP8[(this.ptr)+(13)] = rethrown;
+ }
+
+ get_rethrown() {
+ return HEAP8[(this.ptr)+(13)] != 0;
+ }
+
+ // Initialize native structure fields. Should be called once after allocated.
+ init(type, destructor) {
+ this.set_adjusted_ptr(0);
+ this.set_type(type);
+ this.set_destructor(destructor);
+ }
+
+ set_adjusted_ptr(adjustedPtr) {
+ HEAPU32[(((this.ptr)+(16))>>2)] = adjustedPtr;
+ }
+
+ get_adjusted_ptr() {
+ return HEAPU32[(((this.ptr)+(16))>>2)];
+ }
+ }
+
+ var ___resumeException = (ptr) => {
+ if (!exceptionLast) {
+ exceptionLast = ptr;
+ }
+ throw exceptionLast;
+ };
+
+
+ var setTempRet0 = (val) => __emscripten_tempret_set(val);
+ var findMatchingCatch = (args) => {
+ var thrown =
+ exceptionLast;
+ if (!thrown) {
+ // just pass through the null ptr
+ setTempRet0(0);
+ return 0;
+ }
+ var info = new ExceptionInfo(thrown);
+ info.set_adjusted_ptr(thrown);
+ var thrownType = info.get_type();
+ if (!thrownType) {
+ // just pass through the thrown ptr
+ setTempRet0(0);
+ return thrown;
+ }
+
+ // can_catch receives a **, add indirection
+ // The different catch blocks are denoted by different types.
+ // Due to inheritance, those types may not precisely match the
+ // type of the thrown object. Find one which matches, and
+ // return the type of the catch block which should be called.
+ for (var caughtType of args) {
+ if (caughtType === 0 || caughtType === thrownType) {
+ // Catch all clause matched or exactly the same type is caught
+ break;
+ }
+ var adjusted_ptr_addr = info.ptr + 16;
+ if (___cxa_can_catch(caughtType, thrownType, adjusted_ptr_addr)) {
+ setTempRet0(caughtType);
+ return thrown;
+ }
+ }
+ setTempRet0(thrownType);
+ return thrown;
+ };
+ var ___cxa_find_matching_catch_2 = () => findMatchingCatch([]);
+
+ var ___cxa_find_matching_catch_3 = (arg0) => findMatchingCatch([arg0]);
+
+
+
+ var ___cxa_rethrow = () => {
+ var info = exceptionCaught.pop();
+ if (!info) {
+ abort('no exception to throw');
+ }
+ var ptr = info.excPtr;
+ if (!info.get_rethrown()) {
+ // Only pop if the corresponding push was through rethrow_primary_exception
+ exceptionCaught.push(info);
+ info.set_rethrown(true);
+ info.set_caught(false);
+ uncaughtExceptionCount++;
+ }
+ exceptionLast = ptr;
+ throw exceptionLast;
+ };
+
+
+
+ var ___cxa_throw = (ptr, type, destructor) => {
+ var info = new ExceptionInfo(ptr);
+ // Initialize ExceptionInfo content after it was allocated in __cxa_allocate_exception.
+ info.init(type, destructor);
+ exceptionLast = ptr;
+ uncaughtExceptionCount++;
+ throw exceptionLast;
+ };
+
+ var ___cxa_uncaught_exceptions = () => uncaughtExceptionCount;
+
+
+ /** @suppress {duplicate } */
+ var syscallGetVarargI = () => {
+ // the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number.
+ var ret = HEAP32[((+SYSCALLS.varargs)>>2)];
+ SYSCALLS.varargs += 4;
+ return ret;
+ };
+ var syscallGetVarargP = syscallGetVarargI;
+
+
+ var PATH = {
+ isAbs:(path) => path.charAt(0) === '/',
+ splitPath:(filename) => {
+ var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },
+ normalizeArray:(parts, allowAboveRoot) => {
+ // if the path tries to go above the root, `up` ends up > 0
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === '.') {
+ parts.splice(i, 1);
+ } else if (last === '..') {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ // if the path is allowed to go above the root, restore leading ..s
+ if (allowAboveRoot) {
+ for (; up; up--) {
+ parts.unshift('..');
+ }
+ }
+ return parts;
+ },
+ normalize:(path) => {
+ var isAbsolute = PATH.isAbs(path),
+ trailingSlash = path.substr(-1) === '/';
+ // Normalize the path
+ path = PATH.normalizeArray(path.split('/').filter((p) => !!p), !isAbsolute).join('/');
+ if (!path && !isAbsolute) {
+ path = '.';
+ }
+ if (path && trailingSlash) {
+ path += '/';
+ }
+ return (isAbsolute ? '/' : '') + path;
+ },
+ dirname:(path) => {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ // No dirname whatsoever
+ return '.';
+ }
+ if (dir) {
+ // It has a dirname, strip trailing slash
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },
+ basename:(path) => path && path.match(/([^\/]+|\/)\/*$/)[1],
+ join:(...paths) => PATH.normalize(paths.join('/')),
+ join2:(l, r) => PATH.normalize(l + '/' + r),
+ };
+
+ var initRandomFill = () => {
+ // This block is not needed on v19+ since crypto.getRandomValues is builtin
+ if (ENVIRONMENT_IS_NODE) {
+ var nodeCrypto = require('crypto');
+ return (view) => nodeCrypto.randomFillSync(view);
+ }
+
+ return (view) => crypto.getRandomValues(view);
+ };
+ var randomFill = (view) => {
+ // Lazily init on the first invocation.
+ (randomFill = initRandomFill())(view);
+ };
+
+
+
+ var PATH_FS = {
+ resolve:(...args) => {
+ var resolvedPath = '',
+ resolvedAbsolute = false;
+ for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = (i >= 0) ? args[i] : FS.cwd();
+ // Skip empty and invalid entries
+ if (typeof path != 'string') {
+ throw new TypeError('Arguments to path.resolve must be strings');
+ } else if (!path) {
+ return ''; // an invalid portion invalidates the whole thing
+ }
+ resolvedPath = path + '/' + resolvedPath;
+ resolvedAbsolute = PATH.isAbs(path);
+ }
+ // At this point the path should be resolved to a full absolute path, but
+ // handle relative paths to be safe (might happen when process.cwd() fails)
+ resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter((p) => !!p), !resolvedAbsolute).join('/');
+ return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
+ },
+ relative:(from, to) => {
+ from = PATH_FS.resolve(from).substr(1);
+ to = PATH_FS.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== '') break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== '') break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split('/'));
+ var toParts = trim(to.split('/'));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push('..');
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join('/');
+ },
+ };
+
+
+
+ var FS_stdin_getChar_buffer = [];
+
+ var lengthBytesUTF8 = (str) => {
+ var len = 0;
+ for (var i = 0; i < str.length; ++i) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
+ // unit, not a Unicode code point of the character! So decode
+ // UTF16->UTF32->UTF8.
+ // See http://unicode.org/faq/utf_bom.html#utf16-3
+ var c = str.charCodeAt(i); // possibly a lead surrogate
+ if (c <= 0x7F) {
+ len++;
+ } else if (c <= 0x7FF) {
+ len += 2;
+ } else if (c >= 0xD800 && c <= 0xDFFF) {
+ len += 4; ++i;
+ } else {
+ len += 3;
+ }
+ }
+ return len;
+ };
+
+ var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => {
+ // Parameter maxBytesToWrite is not optional. Negative values, 0, null,
+ // undefined and false each don't write out any bytes.
+ if (!(maxBytesToWrite > 0))
+ return 0;
+
+ var startIdx = outIdx;
+ var endIdx = outIdx + maxBytesToWrite - 1; // -1 for string null terminator.
+ for (var i = 0; i < str.length; ++i) {
+ // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
+ // unit, not a Unicode code point of the character! So decode
+ // UTF16->UTF32->UTF8.
+ // See http://unicode.org/faq/utf_bom.html#utf16-3
+ // For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description
+ // and https://www.ietf.org/rfc/rfc2279.txt
+ // and https://tools.ietf.org/html/rfc3629
+ var u = str.charCodeAt(i); // possibly a lead surrogate
+ if (u >= 0xD800 && u <= 0xDFFF) {
+ var u1 = str.charCodeAt(++i);
+ u = 0x10000 + ((u & 0x3FF) << 10) | (u1 & 0x3FF);
+ }
+ if (u <= 0x7F) {
+ if (outIdx >= endIdx) break;
+ heap[outIdx++] = u;
+ } else if (u <= 0x7FF) {
+ if (outIdx + 1 >= endIdx) break;
+ heap[outIdx++] = 0xC0 | (u >> 6);
+ heap[outIdx++] = 0x80 | (u & 63);
+ } else if (u <= 0xFFFF) {
+ if (outIdx + 2 >= endIdx) break;
+ heap[outIdx++] = 0xE0 | (u >> 12);
+ heap[outIdx++] = 0x80 | ((u >> 6) & 63);
+ heap[outIdx++] = 0x80 | (u & 63);
+ } else {
+ if (outIdx + 3 >= endIdx) break;
+ heap[outIdx++] = 0xF0 | (u >> 18);
+ heap[outIdx++] = 0x80 | ((u >> 12) & 63);
+ heap[outIdx++] = 0x80 | ((u >> 6) & 63);
+ heap[outIdx++] = 0x80 | (u & 63);
+ }
+ }
+ // Null-terminate the pointer to the buffer.
+ heap[outIdx] = 0;
+ return outIdx - startIdx;
+ };
+ /** @type {function(string, boolean=, number=)} */
+ function intArrayFromString(stringy, dontAddNull, length) {
+ var len = length > 0 ? length : lengthBytesUTF8(stringy)+1;
+ var u8array = new Array(len);
+ var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length);
+ if (dontAddNull) u8array.length = numBytesWritten;
+ return u8array;
+ }
+ var FS_stdin_getChar = () => {
+ if (!FS_stdin_getChar_buffer.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ // we will read data by chunks of BUFSIZE
+ var BUFSIZE = 256;
+ var buf = Buffer.alloc(BUFSIZE);
+ var bytesRead = 0;
+
+ // For some reason we must suppress a closure warning here, even though
+ // fd definitely exists on process.stdin, and is even the proper way to
+ // get the fd of stdin,
+ // https://github.com/nodejs/help/issues/2136#issuecomment-523649904
+ // This started to happen after moving this logic out of library_tty.js,
+ // so it is related to the surrounding code in some unclear manner.
+ /** @suppress {missingProperties} */
+ var fd = process.stdin.fd;
+
+ try {
+ bytesRead = fs.readSync(fd, buf, 0, BUFSIZE);
+ } catch(e) {
+ // Cross-platform differences: on Windows, reading EOF throws an
+ // exception, but on other OSes, reading EOF returns 0. Uniformize
+ // behavior by treating the EOF exception to return 0.
+ if (e.toString().includes('EOF')) bytesRead = 0;
+ else throw e;
+ }
+
+ if (bytesRead > 0) {
+ result = buf.slice(0, bytesRead).toString('utf-8');
+ }
+ } else
+ if (typeof window != 'undefined' &&
+ typeof window.prompt == 'function') {
+ // Browser.
+ result = window.prompt('Input: '); // returns null on cancel
+ if (result !== null) {
+ result += '\n';
+ }
+ } else
+ {}
+ if (!result) {
+ return null;
+ }
+ FS_stdin_getChar_buffer = intArrayFromString(result, true);
+ }
+ return FS_stdin_getChar_buffer.shift();
+ };
+ var TTY = {
+ ttys:[],
+ init() {
+ // https://github.com/emscripten-core/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // currently, FS.init does not distinguish if process.stdin is a file or TTY
+ // // device, it always assumes it's a TTY device. because of this, we're forcing
+ // // process.stdin to UTF8 encoding to at least make stdin reading compatible
+ // // with text files until FS.init can be refactored.
+ // process.stdin.setEncoding('utf8');
+ // }
+ },
+ shutdown() {
+ // https://github.com/emscripten-core/emscripten/pull/1555
+ // if (ENVIRONMENT_IS_NODE) {
+ // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
+ // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
+ // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
+ // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
+ // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
+ // process.stdin.pause();
+ // }
+ },
+ register(dev, ops) {
+ TTY.ttys[dev] = { input: [], output: [], ops: ops };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },
+ stream_ops:{
+ open(stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(43);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },
+ close(stream) {
+ // flush any pending line data
+ stream.tty.ops.fsync(stream.tty);
+ },
+ fsync(stream) {
+ stream.tty.ops.fsync(stream.tty);
+ },
+ read(stream, buffer, offset, length, pos /* ignored */) {
+ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(60);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(6);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.atime = Date.now();
+ }
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(60);
+ }
+ try {
+ for (var i = 0; i < length; i++) {
+ stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
+ }
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (length) {
+ stream.node.mtime = stream.node.ctime = Date.now();
+ }
+ return i;
+ },
+ },
+ default_tty_ops:{
+ get_char(tty) {
+ return FS_stdin_getChar();
+ },
+ put_char(tty, val) {
+ if (val === null || val === 10) {
+ out(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ } else {
+ if (val != 0) tty.output.push(val); // val == 0 would cut text output off in the middle.
+ }
+ },
+ fsync(tty) {
+ if (tty.output && tty.output.length > 0) {
+ out(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ }
+ },
+ ioctl_tcgets(tty) {
+ // typical setting
+ return {
+ c_iflag: 25856,
+ c_oflag: 5,
+ c_cflag: 191,
+ c_lflag: 35387,
+ c_cc: [
+ 0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00,
+ 0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ]
+ };
+ },
+ ioctl_tcsets(tty, optional_actions, data) {
+ // currently just ignore
+ return 0;
+ },
+ ioctl_tiocgwinsz(tty) {
+ return [24, 80];
+ },
+ },
+ default_tty1_ops:{
+ put_char(tty, val) {
+ if (val === null || val === 10) {
+ err(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ } else {
+ if (val != 0) tty.output.push(val);
+ }
+ },
+ fsync(tty) {
+ if (tty.output && tty.output.length > 0) {
+ err(UTF8ArrayToString(tty.output));
+ tty.output = [];
+ }
+ },
+ },
+ };
+
+
+ var zeroMemory = (address, size) => {
+ HEAPU8.fill(0, address, address + size);
+ };
+
+ var alignMemory = (size, alignment) => {
+ return Math.ceil(size / alignment) * alignment;
+ };
+ var mmapAlloc = (size) => {
+ size = alignMemory(size, 65536);
+ var ptr = _emscripten_builtin_memalign(65536, size);
+ if (ptr) zeroMemory(ptr, size);
+ return ptr;
+ };
+ var MEMFS = {
+ ops_table:null,
+ mount(mount) {
+ return MEMFS.createNode(null, '/', 16895, 0);
+ },
+ createNode(parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ // no supported
+ throw new FS.ErrnoError(63);
+ }
+ MEMFS.ops_table ||= {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek
+ }
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap,
+ msync: MEMFS.stream_ops.msync
+ }
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink
+ },
+ stream: {}
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr
+ },
+ stream: FS.chrdev_stream_ops
+ }
+ };
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.usedBytes = 0; // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity.
+ // When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred
+ // for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size
+ // penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme.
+ node.contents = null;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.atime = node.mtime = node.ctime = Date.now();
+ // add the new node to the parent
+ if (parent) {
+ parent.contents[name] = node;
+ parent.atime = parent.mtime = parent.ctime = node.atime;
+ }
+ return node;
+ },
+ getFileDataAsTypedArray(node) {
+ if (!node.contents) return new Uint8Array(0);
+ if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes.
+ return new Uint8Array(node.contents);
+ },
+ expandFileStorage(node, newCapacity) {
+ var prevCapacity = node.contents ? node.contents.length : 0;
+ if (prevCapacity >= newCapacity) return; // No need to expand, the storage was already large enough.
+ // Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity.
+ // For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to
+ // avoid overshooting the allocation cap by a very large margin.
+ var CAPACITY_DOUBLING_MAX = 1024 * 1024;
+ newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2.0 : 1.125)) >>> 0);
+ if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); // At minimum allocate 256b for each file when expanding.
+ var oldContents = node.contents;
+ node.contents = new Uint8Array(newCapacity); // Allocate new storage.
+ if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage.
+ },
+ resizeFileStorage(node, newSize) {
+ if (node.usedBytes == newSize) return;
+ if (newSize == 0) {
+ node.contents = null; // Fully decommit when requesting a resize to zero.
+ node.usedBytes = 0;
+ } else {
+ var oldContents = node.contents;
+ node.contents = new Uint8Array(newSize); // Allocate new storage.
+ if (oldContents) {
+ node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); // Copy old data over to the new storage.
+ }
+ node.usedBytes = newSize;
+ }
+ },
+ node_ops:{
+ getattr(node) {
+ var attr = {};
+ // device numbers reuse inode numbers.
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.usedBytes;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.atime);
+ attr.mtime = new Date(node.mtime);
+ attr.ctime = new Date(node.ctime);
+ // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
+ // but this is not required by the standard.
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },
+ setattr(node, attr) {
+ for (const key of ["mode", "atime", "mtime", "ctime"]) {
+ if (attr[key] != null) {
+ node[key] = attr[key];
+ }
+ }
+ if (attr.size !== undefined) {
+ MEMFS.resizeFileStorage(node, attr.size);
+ }
+ },
+ lookup(parent, name) {
+ throw MEMFS.doesNotExistError;
+ },
+ mknod(parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },
+ rename(old_node, new_dir, new_name) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {}
+ if (new_node) {
+ if (FS.isDir(old_node.mode)) {
+ // if we're overwriting a directory at new_name, make sure it's empty.
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(55);
+ }
+ }
+ FS.hashRemoveNode(new_node);
+ }
+ // do the internal rewiring
+ delete old_node.parent.contents[old_node.name];
+ new_dir.contents[new_name] = old_node;
+ old_node.name = new_name;
+ new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now();
+ },
+ unlink(parent, name) {
+ delete parent.contents[name];
+ parent.ctime = parent.mtime = Date.now();
+ },
+ rmdir(parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(55);
+ }
+ delete parent.contents[name];
+ parent.ctime = parent.mtime = Date.now();
+ },
+ readdir(node) {
+ return ['.', '..', ...Object.keys(node.contents)];
+ },
+ symlink(parent, newname, oldpath) {
+ var node = MEMFS.createNode(parent, newname, 0o777 | 40960, 0);
+ node.link = oldpath;
+ return node;
+ },
+ readlink(node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(28);
+ }
+ return node.link;
+ },
+ },
+ stream_ops:{
+ read(stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= stream.node.usedBytes) return 0;
+ var size = Math.min(stream.node.usedBytes - position, length);
+ if (size > 8 && contents.subarray) { // non-trivial, and typed array
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else {
+ for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i];
+ }
+ return size;
+ },
+ write(stream, buffer, offset, length, position, canOwn) {
+ // If the buffer is located in main memory (HEAP), and if
+ // memory can grow, we can't hold on to references of the
+ // memory buffer, as they may get invalidated. That means we
+ // need to do copy its contents.
+ if (buffer.buffer === HEAP8.buffer) {
+ canOwn = false;
+ }
+
+ if (!length) return 0;
+ var node = stream.node;
+ node.mtime = node.ctime = Date.now();
+
+ if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array?
+ if (canOwn) {
+ node.contents = buffer.subarray(offset, offset + length);
+ node.usedBytes = length;
+ return length;
+ } else if (node.usedBytes === 0 && position === 0) { // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data.
+ node.contents = buffer.slice(offset, offset + length);
+ node.usedBytes = length;
+ return length;
+ } else if (position + length <= node.usedBytes) { // Writing to an already allocated and used subrange of the file?
+ node.contents.set(buffer.subarray(offset, offset + length), position);
+ return length;
+ }
+ }
+
+ // Appending to an existing file and we need to reallocate, or source data did not come as a typed array.
+ MEMFS.expandFileStorage(node, position+length);
+ if (node.contents.subarray && buffer.subarray) {
+ // Use typed array write which is available.
+ node.contents.set(buffer.subarray(offset, offset + length), position);
+ } else {
+ for (var i = 0; i < length; i++) {
+ node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not.
+ }
+ }
+ node.usedBytes = Math.max(node.usedBytes, position + length);
+ return length;
+ },
+ llseek(stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) {
+ position += stream.position;
+ } else if (whence === 2) {
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.usedBytes;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ return position;
+ },
+ allocate(stream, offset, length) {
+ MEMFS.expandFileStorage(stream.node, offset + length);
+ stream.node.usedBytes = Math.max(stream.node.usedBytes, offset + length);
+ },
+ mmap(stream, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ // Only make a new copy when MAP_PRIVATE is specified.
+ if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) {
+ // We can't emulate MAP_SHARED when the file is not backed by the
+ // buffer we're mapping to (e.g. the HEAP buffer).
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ allocated = true;
+ ptr = mmapAlloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(48);
+ }
+ if (contents) {
+ // Try to avoid unnecessary slices.
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(contents, position, position + length);
+ }
+ }
+ HEAP8.set(contents, ptr);
+ }
+ }
+ return { ptr, allocated };
+ },
+ msync(stream, buffer, offset, length, mmapFlags) {
+ MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false);
+ // should we check if bytesWritten and length are the same?
+ return 0;
+ },
+ },
+ };
+
+ var asyncLoad = async (url) => {
+ var arrayBuffer = await readAsync(url);
+ return new Uint8Array(arrayBuffer);
+ };
+
+
+ var FS_createDataFile = (parent, name, fileData, canRead, canWrite, canOwn) => {
+ FS.createDataFile(parent, name, fileData, canRead, canWrite, canOwn);
+ };
+
+ var preloadPlugins = Module['preloadPlugins'] || [];
+ var FS_handledByPreloadPlugin = (byteArray, fullname, finish, onerror) => {
+ // Ensure plugins are ready.
+ if (typeof Browser != 'undefined') Browser.init();
+
+ var handled = false;
+ preloadPlugins.forEach((plugin) => {
+ if (handled) return;
+ if (plugin['canHandle'](fullname)) {
+ plugin['handle'](byteArray, fullname, finish, onerror);
+ handled = true;
+ }
+ });
+ return handled;
+ };
+ var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => {
+ // TODO we should allow people to just pass in a complete filename instead
+ // of parent and name being that we just join them anyways
+ var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent;
+ var dep = getUniqueRunDependency(`cp ${fullname}`); // might have several active requests for the same fullname
+ function processData(byteArray) {
+ function finish(byteArray) {
+ preFinish?.();
+ if (!dontCreateFile) {
+ FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
+ }
+ onload?.();
+ removeRunDependency(dep);
+ }
+ if (FS_handledByPreloadPlugin(byteArray, fullname, finish, () => {
+ onerror?.();
+ removeRunDependency(dep);
+ })) {
+ return;
+ }
+ finish(byteArray);
+ }
+ addRunDependency(dep);
+ if (typeof url == 'string') {
+ asyncLoad(url).then(processData, onerror);
+ } else {
+ processData(url);
+ }
+ };
+
+ var FS_modeStringToFlags = (str) => {
+ var flagModes = {
+ 'r': 0,
+ 'r+': 2,
+ 'w': 512 | 64 | 1,
+ 'w+': 512 | 64 | 2,
+ 'a': 1024 | 64 | 1,
+ 'a+': 1024 | 64 | 2,
+ };
+ var flags = flagModes[str];
+ if (typeof flags == 'undefined') {
+ throw new Error(`Unknown file open mode: ${str}`);
+ }
+ return flags;
+ };
+
+ var FS_getMode = (canRead, canWrite) => {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ };
+
+
+
+ var FS = {
+ root:null,
+ mounts:[],
+ devices:{
+ },
+ streams:[],
+ nextInode:1,
+ nameTable:null,
+ currentPath:"/",
+ initialized:false,
+ ignorePermissions:true,
+ ErrnoError:class {
+ name = 'ErrnoError';
+ // We set the `name` property to be able to identify `FS.ErrnoError`
+ // - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway.
+ // - when using PROXYFS, an error can come from an underlying FS
+ // as different FS objects have their own FS.ErrnoError each,
+ // the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs.
+ // we'll use the reliable test `err.name == "ErrnoError"` instead
+ constructor(errno) {
+ this.errno = errno;
+ }
+ },
+ filesystems:null,
+ syncFSRequests:0,
+ readFiles:{
+ },
+ FSStream:class {
+ shared = {};
+ get object() {
+ return this.node;
+ }
+ set object(val) {
+ this.node = val;
+ }
+ get isRead() {
+ return (this.flags & 2097155) !== 1;
+ }
+ get isWrite() {
+ return (this.flags & 2097155) !== 0;
+ }
+ get isAppend() {
+ return (this.flags & 1024);
+ }
+ get flags() {
+ return this.shared.flags;
+ }
+ set flags(val) {
+ this.shared.flags = val;
+ }
+ get position() {
+ return this.shared.position;
+ }
+ set position(val) {
+ this.shared.position = val;
+ }
+ },
+ FSNode:class {
+ node_ops = {};
+ stream_ops = {};
+ readMode = 292 | 73;
+ writeMode = 146;
+ mounted = null;
+ constructor(parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this; // root node sets parent to itself
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.rdev = rdev;
+ this.atime = this.mtime = this.ctime = Date.now();
+ }
+ get read() {
+ return (this.mode & this.readMode) === this.readMode;
+ }
+ set read(val) {
+ val ? this.mode |= this.readMode : this.mode &= ~this.readMode;
+ }
+ get write() {
+ return (this.mode & this.writeMode) === this.writeMode;
+ }
+ set write(val) {
+ val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode;
+ }
+ get isFolder() {
+ return FS.isDir(this.mode);
+ }
+ get isDevice() {
+ return FS.isChrdev(this.mode);
+ }
+ },
+ lookupPath(path, opts = {}) {
+ if (!path) {
+ throw new FS.ErrnoError(44);
+ }
+ opts.follow_mount ??= true
+
+ if (!PATH.isAbs(path)) {
+ path = FS.cwd() + '/' + path;
+ }
+
+ // limit max consecutive symlinks to 40 (SYMLOOP_MAX).
+ linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) {
+ // split the absolute path
+ var parts = path.split('/').filter((p) => !!p);
+
+ // start at the root
+ var current = FS.root;
+ var current_path = '/';
+
+ for (var i = 0; i < parts.length; i++) {
+ var islast = (i === parts.length-1);
+ if (islast && opts.parent) {
+ // stop resolving
+ break;
+ }
+
+ if (parts[i] === '.') {
+ continue;
+ }
+
+ if (parts[i] === '..') {
+ current_path = PATH.dirname(current_path);
+ current = current.parent;
+ continue;
+ }
+
+ current_path = PATH.join2(current_path, parts[i]);
+ try {
+ current = FS.lookupNode(current, parts[i]);
+ } catch (e) {
+ // if noent_okay is true, suppress a ENOENT in the last component
+ // and return an object with an undefined node. This is needed for
+ // resolving symlinks in the path when creating a file.
+ if ((e?.errno === 44) && islast && opts.noent_okay) {
+ return { path: current_path };
+ }
+ throw e;
+ }
+
+ // jump to the mount's root node if this is a mountpoint
+ if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+
+ // by default, lookupPath will not follow a symlink if it is the final path component.
+ // setting opts.follow = true will override this behavior.
+ if (FS.isLink(current.mode) && (!islast || opts.follow)) {
+ if (!current.node_ops.readlink) {
+ throw new FS.ErrnoError(52);
+ }
+ var link = current.node_ops.readlink(current);
+ if (!PATH.isAbs(link)) {
+ link = PATH.dirname(current_path) + '/' + link;
+ }
+ path = link + '/' + parts.slice(i + 1).join('/');
+ continue linkloop;
+ }
+ }
+ return { path: current_path, node: current };
+ }
+ throw new FS.ErrnoError(32);
+ },
+ getPath(node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path;
+ }
+ path = path ? `${node.name}/${path}` : node.name;
+ node = node.parent;
+ }
+ },
+ hashName(parentid, name) {
+ var hash = 0;
+
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },
+ hashAddNode(node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },
+ hashRemoveNode(node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },
+ lookupNode(parent, name) {
+ var errCode = FS.mayLookup(parent);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ // if we failed to find it in the cache, call into the VFS
+ return FS.lookup(parent, name);
+ },
+ createNode(parent, name, mode, rdev) {
+ var node = new FS.FSNode(parent, name, mode, rdev);
+
+ FS.hashAddNode(node);
+
+ return node;
+ },
+ destroyNode(node) {
+ FS.hashRemoveNode(node);
+ },
+ isRoot(node) {
+ return node === node.parent;
+ },
+ isMountpoint(node) {
+ return !!node.mounted;
+ },
+ isFile(mode) {
+ return (mode & 61440) === 32768;
+ },
+ isDir(mode) {
+ return (mode & 61440) === 16384;
+ },
+ isLink(mode) {
+ return (mode & 61440) === 40960;
+ },
+ isChrdev(mode) {
+ return (mode & 61440) === 8192;
+ },
+ isBlkdev(mode) {
+ return (mode & 61440) === 24576;
+ },
+ isFIFO(mode) {
+ return (mode & 61440) === 4096;
+ },
+ isSocket(mode) {
+ return (mode & 49152) === 49152;
+ },
+ flagsToPermissionString(flag) {
+ var perms = ['r', 'w', 'rw'][flag & 3];
+ if ((flag & 512)) {
+ perms += 'w';
+ }
+ return perms;
+ },
+ nodePermissions(node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ // return 0 if any user, group or owner bits are set.
+ if (perms.includes('r') && !(node.mode & 292)) {
+ return 2;
+ } else if (perms.includes('w') && !(node.mode & 146)) {
+ return 2;
+ } else if (perms.includes('x') && !(node.mode & 73)) {
+ return 2;
+ }
+ return 0;
+ },
+ mayLookup(dir) {
+ if (!FS.isDir(dir.mode)) return 54;
+ var errCode = FS.nodePermissions(dir, 'x');
+ if (errCode) return errCode;
+ if (!dir.node_ops.lookup) return 2;
+ return 0;
+ },
+ mayCreate(dir, name) {
+ if (!FS.isDir(dir.mode)) {
+ return 54;
+ }
+ try {
+ var node = FS.lookupNode(dir, name);
+ return 20;
+ } catch (e) {
+ }
+ return FS.nodePermissions(dir, 'wx');
+ },
+ mayDelete(dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var errCode = FS.nodePermissions(dir, 'wx');
+ if (errCode) {
+ return errCode;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return 54;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return 10;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return 31;
+ }
+ }
+ return 0;
+ },
+ mayOpen(node, flags) {
+ if (!node) {
+ return 44;
+ }
+ if (FS.isLink(node.mode)) {
+ return 32;
+ } else if (FS.isDir(node.mode)) {
+ if (FS.flagsToPermissionString(flags) !== 'r' // opening for write
+ || (flags & (512 | 64))) { // TODO: check for O_SEARCH? (== search for dir only)
+ return 31;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },
+ checkOpExists(op, err) {
+ if (!op) {
+ throw new FS.ErrnoError(err);
+ }
+ return op;
+ },
+ MAX_OPEN_FDS:4096,
+ nextfd() {
+ for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(33);
+ },
+ getStreamChecked(fd) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(8);
+ }
+ return stream;
+ },
+ getStream:(fd) => FS.streams[fd],
+ createStream(stream, fd = -1) {
+
+ // clone it, so we can return an instance of FSStream
+ stream = Object.assign(new FS.FSStream(), stream);
+ if (fd == -1) {
+ fd = FS.nextfd();
+ }
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },
+ closeStream(fd) {
+ FS.streams[fd] = null;
+ },
+ dupStream(origStream, fd = -1) {
+ var stream = FS.createStream(origStream, fd);
+ stream.stream_ops?.dup?.(stream);
+ return stream;
+ },
+ chrdev_stream_ops:{
+ open(stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ // override node's stream ops with the device's
+ stream.stream_ops = device.stream_ops;
+ // forward the open call
+ stream.stream_ops.open?.(stream);
+ },
+ llseek() {
+ throw new FS.ErrnoError(70);
+ },
+ },
+ major:(dev) => ((dev) >> 8),
+ minor:(dev) => ((dev) & 0xff),
+ makedev:(ma, mi) => ((ma) << 8 | (mi)),
+ registerDevice(dev, ops) {
+ FS.devices[dev] = { stream_ops: ops };
+ },
+ getDevice:(dev) => FS.devices[dev],
+ getMounts(mount) {
+ var mounts = [];
+ var check = [mount];
+
+ while (check.length) {
+ var m = check.pop();
+
+ mounts.push(m);
+
+ check.push(...m.mounts);
+ }
+
+ return mounts;
+ },
+ syncfs(populate, callback) {
+ if (typeof populate == 'function') {
+ callback = populate;
+ populate = false;
+ }
+
+ FS.syncFSRequests++;
+
+ if (FS.syncFSRequests > 1) {
+ err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`);
+ }
+
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+
+ function doCallback(errCode) {
+ FS.syncFSRequests--;
+ return callback(errCode);
+ }
+
+ function done(errCode) {
+ if (errCode) {
+ if (!done.errored) {
+ done.errored = true;
+ return doCallback(errCode);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ doCallback(null);
+ }
+ };
+
+ // sync all mounts
+ mounts.forEach((mount) => {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },
+ mount(type, opts, mountpoint) {
+ var root = mountpoint === '/';
+ var pseudo = !mountpoint;
+ var node;
+
+ if (root && FS.root) {
+ throw new FS.ErrnoError(10);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ mountpoint = lookup.path; // use the absolute path
+ node = lookup.node;
+
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ }
+
+ var mount = {
+ type,
+ opts,
+ mountpoint,
+ mounts: []
+ };
+
+ // create a root node for the fs
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ // set as a mountpoint
+ node.mounted = mount;
+
+ // add the new mount to the current mount's children
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+
+ return mountRoot;
+ },
+ unmount(mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
+
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(28);
+ }
+
+ // destroy the nodes for this mount, and all its child mounts
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+
+ Object.keys(FS.nameTable).forEach((hash) => {
+ var current = FS.nameTable[hash];
+
+ while (current) {
+ var next = current.name_next;
+
+ if (mounts.includes(current.mount)) {
+ FS.destroyNode(current);
+ }
+
+ current = next;
+ }
+ });
+
+ // no longer a mountpoint
+ node.mounted = null;
+
+ // remove this mount from the child mounts
+ var idx = node.mount.mounts.indexOf(mount);
+ node.mount.mounts.splice(idx, 1);
+ },
+ lookup(parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },
+ mknod(path, mode, dev) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ if (!name) {
+ throw new FS.ErrnoError(28);
+ }
+ if (name === '.' || name === '..') {
+ throw new FS.ErrnoError(20);
+ }
+ var errCode = FS.mayCreate(parent, name);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(63);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },
+ statfs(path) {
+ return FS.statfsNode(FS.lookupPath(path, {follow: true}).node);
+ },
+ statfsStream(stream) {
+ // We keep a separate statfsStream function because noderawfs overrides
+ // it. In noderawfs, stream.node is sometimes null. Instead, we need to
+ // look at stream.path.
+ return FS.statfsNode(stream.node);
+ },
+ statfsNode(node) {
+ // NOTE: None of the defaults here are true. We're just returning safe and
+ // sane values. Currently nodefs and rawfs replace these defaults,
+ // other file systems leave them alone.
+ var rtn = {
+ bsize: 4096,
+ frsize: 4096,
+ blocks: 1e6,
+ bfree: 5e5,
+ bavail: 5e5,
+ files: FS.nextInode,
+ ffree: FS.nextInode - 1,
+ fsid: 42,
+ flags: 2,
+ namelen: 255,
+ };
+
+ if (node.node_ops.statfs) {
+ Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root));
+ }
+ return rtn;
+ },
+ create(path, mode = 0o666) {
+ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },
+ mkdir(path, mode = 0o777) {
+ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },
+ mkdirTree(path, mode) {
+ var dirs = path.split('/');
+ var d = '';
+ for (var i = 0; i < dirs.length; ++i) {
+ if (!dirs[i]) continue;
+ d += '/' + dirs[i];
+ try {
+ FS.mkdir(d, mode);
+ } catch(e) {
+ if (e.errno != 20) throw e;
+ }
+ }
+ },
+ mkdev(path, mode, dev) {
+ if (typeof dev == 'undefined') {
+ dev = mode;
+ mode = 0o666;
+ }
+ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },
+ symlink(oldpath, newpath) {
+ if (!PATH_FS.resolve(oldpath)) {
+ throw new FS.ErrnoError(44);
+ }
+ var lookup = FS.lookupPath(newpath, { parent: true });
+ var parent = lookup.node;
+ if (!parent) {
+ throw new FS.ErrnoError(44);
+ }
+ var newname = PATH.basename(newpath);
+ var errCode = FS.mayCreate(parent, newname);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(63);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },
+ rename(old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ // parents must exist
+ var lookup, old_dir, new_dir;
+
+ // let the errors from non existent directories percolate up
+ lookup = FS.lookupPath(old_path, { parent: true });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, { parent: true });
+ new_dir = lookup.node;
+
+ if (!old_dir || !new_dir) throw new FS.ErrnoError(44);
+ // need to be part of the same mount
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(75);
+ }
+ // source must exist
+ var old_node = FS.lookupNode(old_dir, old_name);
+ // old path should not be an ancestor of the new path
+ var relative = PATH_FS.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(28);
+ }
+ // new path should not be an ancestor of the old path
+ relative = PATH_FS.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== '.') {
+ throw new FS.ErrnoError(55);
+ }
+ // see if the new path already exists
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {
+ // not fatal
+ }
+ // early out if nothing needs to change
+ if (old_node === new_node) {
+ return;
+ }
+ // we'll need to delete the old entry
+ var isdir = FS.isDir(old_node.mode);
+ var errCode = FS.mayDelete(old_dir, old_name, isdir);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ // need delete permissions if we'll be overwriting.
+ // need create permissions if new doesn't already exist.
+ errCode = new_node ?
+ FS.mayDelete(new_dir, new_name, isdir) :
+ FS.mayCreate(new_dir, new_name);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
+ throw new FS.ErrnoError(10);
+ }
+ // if we are going to change the parent, check write permissions
+ if (new_dir !== old_dir) {
+ errCode = FS.nodePermissions(old_dir, 'w');
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ }
+ // remove the node from the lookup hash
+ FS.hashRemoveNode(old_node);
+ // do the underlying fs rename
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ // update old node (we do this here to avoid each backend
+ // needing to)
+ old_node.parent = new_dir;
+ } catch (e) {
+ throw e;
+ } finally {
+ // add the node back to the hash (in case node_ops.rename
+ // changed its name)
+ FS.hashAddNode(old_node);
+ }
+ },
+ rmdir(path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var errCode = FS.mayDelete(parent, name, true);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },
+ readdir(path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ var readdir = FS.checkOpExists(node.node_ops.readdir, 54);
+ return readdir(node);
+ },
+ unlink(path) {
+ var lookup = FS.lookupPath(path, { parent: true });
+ var parent = lookup.node;
+ if (!parent) {
+ throw new FS.ErrnoError(44);
+ }
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var errCode = FS.mayDelete(parent, name, false);
+ if (errCode) {
+ // According to POSIX, we should map EISDIR to EPERM, but
+ // we instead do what Linux does (and we must, as we use
+ // the musl linux libc).
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },
+ readlink(path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link) {
+ throw new FS.ErrnoError(44);
+ }
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(28);
+ }
+ return link.node_ops.readlink(link);
+ },
+ stat(path, dontFollow) {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ var node = lookup.node;
+ var getattr = FS.checkOpExists(node.node_ops.getattr, 63);
+ return getattr(node);
+ },
+ lstat(path) {
+ return FS.stat(path, true);
+ },
+ chmod(path, mode, dontFollow) {
+ var node;
+ if (typeof path == 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ ctime: Date.now(),
+ dontFollow
+ });
+ },
+ lchmod(path, mode) {
+ FS.chmod(path, mode, true);
+ },
+ fchmod(fd, mode) {
+ var stream = FS.getStreamChecked(fd);
+ FS.chmod(stream.node, mode);
+ },
+ chown(path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path == 'string') {
+ var lookup = FS.lookupPath(path, { follow: !dontFollow });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ timestamp: Date.now(),
+ dontFollow
+ // we ignore the uid / gid for now
+ });
+ },
+ lchown(path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },
+ fchown(fd, uid, gid) {
+ var stream = FS.getStreamChecked(fd);
+ FS.chown(stream.node, uid, gid);
+ },
+ truncate(path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ var node;
+ if (typeof path == 'string') {
+ var lookup = FS.lookupPath(path, { follow: true });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(28);
+ }
+ var errCode = FS.nodePermissions(node, 'w');
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ size: len,
+ timestamp: Date.now()
+ });
+ },
+ ftruncate(fd, len) {
+ var stream = FS.getStreamChecked(fd);
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(28);
+ }
+ FS.truncate(stream.node, len);
+ },
+ utime(path, atime, mtime) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ var node = lookup.node;
+ var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
+ setattr(node, {
+ atime: atime,
+ mtime: mtime
+ });
+ },
+ open(path, flags, mode = 0o666) {
+ if (path === "") {
+ throw new FS.ErrnoError(44);
+ }
+ flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags;
+ if ((flags & 64)) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ var isDirPath;
+ if (typeof path == 'object') {
+ node = path;
+ } else {
+ isDirPath = path.endsWith("/");
+ // noent_okay makes it so that if the final component of the path
+ // doesn't exist, lookupPath returns `node: undefined`. `path` will be
+ // updated to point to the target of all symlinks.
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072),
+ noent_okay: true
+ });
+ node = lookup.node;
+ path = lookup.path;
+ }
+ // perhaps we need to create the node
+ var created = false;
+ if ((flags & 64)) {
+ if (node) {
+ // if O_CREAT and O_EXCL are set, error out if the node already exists
+ if ((flags & 128)) {
+ throw new FS.ErrnoError(20);
+ }
+ } else if (isDirPath) {
+ throw new FS.ErrnoError(31);
+ } else {
+ // node doesn't exist, try to create it
+ // Ignore the permission bits here to ensure we can `open` this new
+ // file below. We use chmod below the apply the permissions once the
+ // file is open.
+ node = FS.mknod(path, mode | 0o777, 0);
+ created = true;
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(44);
+ }
+ // can't truncate a device
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ // if asked only for a directory, then this must be one
+ if ((flags & 65536) && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ // check permissions, if this is not a file we just created now (it is ok to
+ // create and write to a file with read-only permissions; it is read-only
+ // for later use)
+ if (!created) {
+ var errCode = FS.mayOpen(node, flags);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ }
+ // do truncation if necessary
+ if ((flags & 512) && !created) {
+ FS.truncate(node, 0);
+ }
+ // we've already handled these, don't pass down to the underlying vfs
+ flags &= ~(128 | 512 | 131072);
+
+ // register the stream with the filesystem
+ var stream = FS.createStream({
+ node,
+ path: FS.getPath(node), // we want the absolute path to the node
+ flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ // used by the file family libc calls (fopen, fwrite, ferror, etc.)
+ ungotten: [],
+ error: false
+ });
+ // call the new stream's open function
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (created) {
+ FS.chmod(node, mode & 0o777);
+ }
+ if (Module['logReadFiles'] && !(flags & 1)) {
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ }
+ }
+ return stream;
+ },
+ close(stream) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (stream.getdents) stream.getdents = null; // free readdir state
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ stream.fd = null;
+ },
+ isClosed(stream) {
+ return stream.fd === null;
+ },
+ llseek(stream, offset, whence) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(70);
+ }
+ if (whence != 0 && whence != 1 && whence != 2) {
+ throw new FS.ErrnoError(28);
+ }
+ stream.position = stream.stream_ops.llseek(stream, offset, whence);
+ stream.ungotten = [];
+ return stream.position;
+ },
+ read(stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(8);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(28);
+ }
+ var seeking = typeof position != 'undefined';
+ if (!seeking) {
+ position = stream.position;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(70);
+ }
+ var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(8);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(28);
+ }
+ if (stream.seekable && stream.flags & 1024) {
+ // seek to the end before writing in append mode
+ FS.llseek(stream, 0, 2);
+ }
+ var seeking = typeof position != 'undefined';
+ if (!seeking) {
+ position = stream.position;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(70);
+ }
+ var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },
+ allocate(stream, offset, length) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(8);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(138);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },
+ mmap(stream, length, position, prot, flags) {
+ // User requests writing to file (prot & PROT_WRITE != 0).
+ // Checking if we have permissions to write to the file unless
+ // MAP_PRIVATE flag is set. According to POSIX spec it is possible
+ // to write to file opened in read-only mode with MAP_PRIVATE flag,
+ // as all modifications will be visible only in the memory of
+ // the current process.
+ if ((prot & 2) !== 0
+ && (flags & 2) === 0
+ && (stream.flags & 2097155) !== 2) {
+ throw new FS.ErrnoError(2);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(2);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(43);
+ }
+ if (!length) {
+ throw new FS.ErrnoError(28);
+ }
+ return stream.stream_ops.mmap(stream, length, position, prot, flags);
+ },
+ msync(stream, buffer, offset, length, mmapFlags) {
+ if (!stream.stream_ops.msync) {
+ return 0;
+ }
+ return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags);
+ },
+ ioctl(stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(59);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },
+ readFile(path, opts = {}) {
+ opts.flags = opts.flags || 0;
+ opts.encoding = opts.encoding || 'binary';
+ if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
+ throw new Error(`Invalid encoding type "${opts.encoding}"`);
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === 'utf8') {
+ ret = UTF8ArrayToString(buf);
+ } else if (opts.encoding === 'binary') {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },
+ writeFile(path, data, opts = {}) {
+ opts.flags = opts.flags || 577;
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (typeof data == 'string') {
+ var buf = new Uint8Array(lengthBytesUTF8(data)+1);
+ var actualNumBytes = stringToUTF8Array(data, buf, 0, buf.length);
+ FS.write(stream, buf, 0, actualNumBytes, undefined, opts.canOwn);
+ } else if (ArrayBuffer.isView(data)) {
+ FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn);
+ } else {
+ throw new Error('Unsupported data type');
+ }
+ FS.close(stream);
+ },
+ cwd:() => FS.currentPath,
+ chdir(path) {
+ var lookup = FS.lookupPath(path, { follow: true });
+ if (lookup.node === null) {
+ throw new FS.ErrnoError(44);
+ }
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ var errCode = FS.nodePermissions(lookup.node, 'x');
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ FS.currentPath = lookup.path;
+ },
+ createDefaultDirectories() {
+ FS.mkdir('/tmp');
+ FS.mkdir('/home');
+ FS.mkdir('/home/web_user');
+ },
+ createDefaultDevices() {
+ // create /dev
+ FS.mkdir('/dev');
+ // setup /dev/null
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: () => 0,
+ write: (stream, buffer, offset, length, pos) => length,
+ llseek: () => 0,
+ });
+ FS.mkdev('/dev/null', FS.makedev(1, 3));
+ // setup /dev/tty and /dev/tty1
+ // stderr needs to print output using err() rather than out()
+ // so we register a second tty just for it.
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev('/dev/tty', FS.makedev(5, 0));
+ FS.mkdev('/dev/tty1', FS.makedev(6, 0));
+ // setup /dev/[u]random
+ // use a buffer to avoid overhead of individual crypto calls per byte
+ var randomBuffer = new Uint8Array(1024), randomLeft = 0;
+ var randomByte = () => {
+ if (randomLeft === 0) {
+ randomFill(randomBuffer);
+ randomLeft = randomBuffer.byteLength;
+ }
+ return randomBuffer[--randomLeft];
+ };
+ FS.createDevice('/dev', 'random', randomByte);
+ FS.createDevice('/dev', 'urandom', randomByte);
+ // we're not going to emulate the actual shm device,
+ // just create the tmp dirs that reside in it commonly
+ FS.mkdir('/dev/shm');
+ FS.mkdir('/dev/shm/tmp');
+ },
+ createSpecialDirectories() {
+ // create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the
+ // name of the stream for fd 6 (see test_unistd_ttyname)
+ FS.mkdir('/proc');
+ var proc_self = FS.mkdir('/proc/self');
+ FS.mkdir('/proc/self/fd');
+ FS.mount({
+ mount() {
+ var node = FS.createNode(proc_self, 'fd', 16895, 73);
+ node.stream_ops = {
+ llseek: MEMFS.stream_ops.llseek,
+ };
+ node.node_ops = {
+ lookup(parent, name) {
+ var fd = +name;
+ var stream = FS.getStreamChecked(fd);
+ var ret = {
+ parent: null,
+ mount: { mountpoint: 'fake' },
+ node_ops: { readlink: () => stream.path },
+ id: fd + 1,
+ };
+ ret.parent = ret; // make it look like a simple root node
+ return ret;
+ },
+ readdir() {
+ return Array.from(FS.streams.entries())
+ .filter(([k, v]) => v)
+ .map(([k, v]) => k.toString());
+ }
+ };
+ return node;
+ }
+ }, {}, '/proc/self/fd');
+ },
+ createStandardStreams(input, output, error) {
+ // TODO deprecate the old functionality of a single
+ // input / output callback and that utilizes FS.createDevice
+ // and instead require a unique set of stream ops
+
+ // by default, we symlink the standard streams to the
+ // default tty devices. however, if the standard streams
+ // have been overwritten we create a unique device for
+ // them instead.
+ if (input) {
+ FS.createDevice('/dev', 'stdin', input);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdin');
+ }
+ if (output) {
+ FS.createDevice('/dev', 'stdout', null, output);
+ } else {
+ FS.symlink('/dev/tty', '/dev/stdout');
+ }
+ if (error) {
+ FS.createDevice('/dev', 'stderr', null, error);
+ } else {
+ FS.symlink('/dev/tty1', '/dev/stderr');
+ }
+
+ // open default streams for the stdin, stdout and stderr devices
+ var stdin = FS.open('/dev/stdin', 0);
+ var stdout = FS.open('/dev/stdout', 1);
+ var stderr = FS.open('/dev/stderr', 1);
+ },
+ staticInit() {
+ FS.nameTable = new Array(4096);
+
+ FS.mount(MEMFS, {}, '/');
+
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ FS.createSpecialDirectories();
+
+ FS.filesystems = {
+ 'MEMFS': MEMFS,
+ };
+ },
+ init(input, output, error) {
+ FS.initialized = true;
+
+ // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
+ input ??= Module['stdin'];
+ output ??= Module['stdout'];
+ error ??= Module['stderr'];
+
+ FS.createStandardStreams(input, output, error);
+ },
+ quit() {
+ FS.initialized = false;
+ // force-flush all streams, so we get musl std streams printed out
+ // close all of our streams
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },
+ findObject(path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (!ret.exists) {
+ return null;
+ }
+ return ret.object;
+ },
+ analyzePath(path, dontResolveLastLink) {
+ // operate from within the context of the symlink's target
+ try {
+ var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ path = lookup.path;
+ } catch (e) {
+ }
+ var ret = {
+ isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
+ parentExists: false, parentPath: null, parentObject: null
+ };
+ try {
+ var lookup = FS.lookupPath(path, { parent: true });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === '/';
+ } catch (e) {
+ ret.error = e.errno;
+ };
+ return ret;
+ },
+ createPath(parent, path, canRead, canWrite) {
+ parent = typeof parent == 'string' ? parent : FS.getPath(parent);
+ var parts = path.split('/').reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {
+ // ignore EEXIST
+ }
+ parent = current;
+ }
+ return current;
+ },
+ createFile(parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS_getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },
+ createDataFile(parent, name, data, canRead, canWrite, canOwn) {
+ var path = name;
+ if (parent) {
+ parent = typeof parent == 'string' ? parent : FS.getPath(parent);
+ path = name ? PATH.join2(parent, name) : parent;
+ }
+ var mode = FS_getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data == 'string') {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ // make sure we can write to the file
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 577);
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ },
+ createDevice(parent, name, input, output) {
+ var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
+ var mode = FS_getMode(!!input, !!output);
+ FS.createDevice.major ??= 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ // Create a fake device that a set of stream ops to emulate
+ // the old behavior.
+ FS.registerDevice(dev, {
+ open(stream) {
+ stream.seekable = false;
+ },
+ close(stream) {
+ // flush any pending line data
+ if (output?.buffer?.length) {
+ output(10);
+ }
+ },
+ read(stream, buffer, offset, length, pos /* ignored */) {
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(6);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset+i] = result;
+ }
+ if (bytesRead) {
+ stream.node.atime = Date.now();
+ }
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset+i]);
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ }
+ if (length) {
+ stream.node.mtime = stream.node.ctime = Date.now();
+ }
+ return i;
+ }
+ });
+ return FS.mkdev(path, mode, dev);
+ },
+ forceLoadFile(obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
+ if (typeof XMLHttpRequest != 'undefined') {
+ throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
+ } else { // Command-line.
+ try {
+ obj.contents = readBinary(obj.url);
+ obj.usedBytes = obj.contents.length;
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ }
+ },
+ createLazyFile(parent, name, url, canRead, canWrite) {
+ // Lazy chunked Uint8Array (implements get and length from Uint8Array).
+ // Actual getting is abstracted away for eventual reuse.
+ class LazyUint8Array {
+ lengthKnown = false;
+ chunks = []; // Loaded chunks. Index is the chunk number
+ get(idx) {
+ if (idx > this.length-1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = (idx / this.chunkSize)|0;
+ return this.getter(chunkNum)[chunkOffset];
+ }
+ setDataGetter(getter) {
+ this.getter = getter;
+ }
+ cacheLength() {
+ // Find length
+ var xhr = new XMLHttpRequest();
+ xhr.open('HEAD', url, false);
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
+ var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip";
+
+ var chunkSize = 1024*1024; // Chunk size in bytes
+
+ if (!hasByteServing) chunkSize = datalength;
+
+ // Function to get a range from the remote URL.
+ var doXHR = (from, to) => {
+ if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!");
+ if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!");
+
+ // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, false);
+ if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+
+ // Some hints to the browser that we want binary data.
+ xhr.responseType = 'arraybuffer';
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType('text/plain; charset=x-user-defined');
+ }
+
+ xhr.send(null);
+ if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status);
+ if (xhr.response !== undefined) {
+ return new Uint8Array(/** @type{Array} */(xhr.response || []));
+ }
+ return intArrayFromString(xhr.responseText || '', true);
+ };
+ var lazyArray = this;
+ lazyArray.setDataGetter((chunkNum) => {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum+1) * chunkSize - 1; // including this byte
+ end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
+ if (typeof lazyArray.chunks[chunkNum] == 'undefined') {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof lazyArray.chunks[chunkNum] == 'undefined') throw new Error('doXHR failed!');
+ return lazyArray.chunks[chunkNum];
+ });
+
+ if (usesGzip || !datalength) {
+ // if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length
+ chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file
+ datalength = this.getter(0).length;
+ chunkSize = datalength;
+ out("LazyFiles on gzip forces download of the whole file when length is accessed");
+ }
+
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ }
+ get length() {
+ if (!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ }
+ get chunkSize() {
+ if (!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ }
+ }
+
+ if (typeof XMLHttpRequest != 'undefined') {
+ if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc';
+ var lazyArray = new LazyUint8Array();
+ var properties = { isDevice: false, contents: lazyArray };
+ } else {
+ var properties = { isDevice: false, url: url };
+ }
+
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ // This is a total hack, but I want to get this lazy file code out of the
+ // core of MEMFS. If we want to keep this lazy file concept I feel it should
+ // be its own thin LAZYFS proxying calls to MEMFS.
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ // Add a function that defers querying the file size until it is asked the first time.
+ Object.defineProperties(node, {
+ usedBytes: {
+ get: function() { return this.contents.length; }
+ }
+ });
+ // override each stream op with one that tries to force load the lazy file first
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach((key) => {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = (...args) => {
+ FS.forceLoadFile(node);
+ return fn(...args);
+ };
+ });
+ function writeChunks(stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length)
+ return 0;
+ var size = Math.min(contents.length - position, length);
+ if (contents.slice) { // normal array
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ }
+ // use a custom read function
+ stream_ops.read = (stream, buffer, offset, length, position) => {
+ FS.forceLoadFile(node);
+ return writeChunks(stream, buffer, offset, length, position)
+ };
+ // use a custom mmap function
+ stream_ops.mmap = (stream, length, position, prot, flags) => {
+ FS.forceLoadFile(node);
+ var ptr = mmapAlloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(48);
+ }
+ writeChunks(stream, HEAP8, ptr, length, position);
+ return { ptr, allocated: true };
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },
+ };
+
+ var SYSCALLS = {
+ DEFAULT_POLLMASK:5,
+ calculateAt(dirfd, path, allowEmpty) {
+ if (PATH.isAbs(path)) {
+ return path;
+ }
+ // relative path
+ var dir;
+ if (dirfd === -100) {
+ dir = FS.cwd();
+ } else {
+ var dirstream = SYSCALLS.getStreamFromFD(dirfd);
+ dir = dirstream.path;
+ }
+ if (path.length == 0) {
+ if (!allowEmpty) {
+ throw new FS.ErrnoError(44);;
+ }
+ return dir;
+ }
+ return dir + '/' + path;
+ },
+ writeStat(buf, stat) {
+ HEAP32[((buf)>>2)] = stat.dev;
+ HEAP32[(((buf)+(4))>>2)] = stat.mode;
+ HEAPU32[(((buf)+(8))>>2)] = stat.nlink;
+ HEAP32[(((buf)+(12))>>2)] = stat.uid;
+ HEAP32[(((buf)+(16))>>2)] = stat.gid;
+ HEAP32[(((buf)+(20))>>2)] = stat.rdev;
+ HEAP64[(((buf)+(24))>>3)] = BigInt(stat.size);
+ HEAP32[(((buf)+(32))>>2)] = 4096;
+ HEAP32[(((buf)+(36))>>2)] = stat.blocks;
+ var atime = stat.atime.getTime();
+ var mtime = stat.mtime.getTime();
+ var ctime = stat.ctime.getTime();
+ HEAP64[(((buf)+(40))>>3)] = BigInt(Math.floor(atime / 1000));
+ HEAPU32[(((buf)+(48))>>2)] = (atime % 1000) * 1000 * 1000;
+ HEAP64[(((buf)+(56))>>3)] = BigInt(Math.floor(mtime / 1000));
+ HEAPU32[(((buf)+(64))>>2)] = (mtime % 1000) * 1000 * 1000;
+ HEAP64[(((buf)+(72))>>3)] = BigInt(Math.floor(ctime / 1000));
+ HEAPU32[(((buf)+(80))>>2)] = (ctime % 1000) * 1000 * 1000;
+ HEAP64[(((buf)+(88))>>3)] = BigInt(stat.ino);
+ return 0;
+ },
+ writeStatFs(buf, stats) {
+ HEAP32[(((buf)+(4))>>2)] = stats.bsize;
+ HEAP32[(((buf)+(40))>>2)] = stats.bsize;
+ HEAP32[(((buf)+(8))>>2)] = stats.blocks;
+ HEAP32[(((buf)+(12))>>2)] = stats.bfree;
+ HEAP32[(((buf)+(16))>>2)] = stats.bavail;
+ HEAP32[(((buf)+(20))>>2)] = stats.files;
+ HEAP32[(((buf)+(24))>>2)] = stats.ffree;
+ HEAP32[(((buf)+(28))>>2)] = stats.fsid;
+ HEAP32[(((buf)+(44))>>2)] = stats.flags; // ST_NOSUID
+ HEAP32[(((buf)+(36))>>2)] = stats.namelen;
+ },
+ doMsync(addr, stream, len, flags, offset) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ if (flags & 2) {
+ // MAP_PRIVATE calls need not to be synced back to underlying fs
+ return 0;
+ }
+ var buffer = HEAPU8.slice(addr, addr + len);
+ FS.msync(stream, buffer, offset, len, flags);
+ },
+ getStreamFromFD(fd) {
+ var stream = FS.getStreamChecked(fd);
+ return stream;
+ },
+ varargs:undefined,
+ getStr(ptr) {
+ var ret = UTF8ToString(ptr);
+ return ret;
+ },
+ };
+ function ___syscall_fcntl64(fd, cmd, varargs) {
+ SYSCALLS.varargs = varargs;
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ switch (cmd) {
+ case 0: {
+ var arg = syscallGetVarargI();
+ if (arg < 0) {
+ return -28;
+ }
+ while (FS.streams[arg]) {
+ arg++;
+ }
+ var newStream;
+ newStream = FS.dupStream(stream, arg);
+ return newStream.fd;
+ }
+ case 1:
+ case 2:
+ return 0; // FD_CLOEXEC makes no sense for a single process.
+ case 3:
+ return stream.flags;
+ case 4: {
+ var arg = syscallGetVarargI();
+ stream.flags |= arg;
+ return 0;
+ }
+ case 12: {
+ var arg = syscallGetVarargP();
+ var offset = 0;
+ // We're always unlocked.
+ HEAP16[(((arg)+(offset))>>1)] = 2;
+ return 0;
+ }
+ case 13:
+ case 14:
+ return 0; // Pretend that the locking is successful.
+ }
+ return -28;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_fstat64(fd, buf) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ return SYSCALLS.writeStat(buf, FS.stat(stream.path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ var stringToUTF8 = (str, outPtr, maxBytesToWrite) => {
+ return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite);
+ };
+
+ function ___syscall_getdents64(fd, dirp, count) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd)
+ stream.getdents ||= FS.readdir(stream.path);
+
+ var struct_size = 280;
+ var pos = 0;
+ var off = FS.llseek(stream, 0, 1);
+
+ var startIdx = Math.floor(off / struct_size);
+ var endIdx = Math.min(stream.getdents.length, startIdx + Math.floor(count/struct_size))
+ for (var idx = startIdx; idx < endIdx; idx++) {
+ var id;
+ var type;
+ var name = stream.getdents[idx];
+ if (name === '.') {
+ id = stream.node.id;
+ type = 4; // DT_DIR
+ }
+ else if (name === '..') {
+ var lookup = FS.lookupPath(stream.path, { parent: true });
+ id = lookup.node.id;
+ type = 4; // DT_DIR
+ }
+ else {
+ var child;
+ try {
+ child = FS.lookupNode(stream.node, name);
+ } catch (e) {
+ // If the entry is not a directory, file, or symlink, nodefs
+ // lookupNode will raise EINVAL. Skip these and continue.
+ if (e?.errno === 28) {
+ continue;
+ }
+ throw e;
+ }
+ id = child.id;
+ type = FS.isChrdev(child.mode) ? 2 : // DT_CHR, character device.
+ FS.isDir(child.mode) ? 4 : // DT_DIR, directory.
+ FS.isLink(child.mode) ? 10 : // DT_LNK, symbolic link.
+ 8; // DT_REG, regular file.
+ }
+ HEAP64[((dirp + pos)>>3)] = BigInt(id);
+ HEAP64[(((dirp + pos)+(8))>>3)] = BigInt((idx + 1) * struct_size);
+ HEAP16[(((dirp + pos)+(16))>>1)] = 280;
+ HEAP8[(dirp + pos)+(18)] = type;
+ stringToUTF8(name, dirp + pos + 19, 256);
+ pos += struct_size;
+ }
+ FS.llseek(stream, idx * struct_size, 0);
+ return pos;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+
+ function ___syscall_ioctl(fd, op, varargs) {
+ SYSCALLS.varargs = varargs;
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ switch (op) {
+ case 21509: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+ case 21505: {
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tcgets) {
+ var termios = stream.tty.ops.ioctl_tcgets(stream);
+ var argp = syscallGetVarargP();
+ HEAP32[((argp)>>2)] = termios.c_iflag || 0;
+ HEAP32[(((argp)+(4))>>2)] = termios.c_oflag || 0;
+ HEAP32[(((argp)+(8))>>2)] = termios.c_cflag || 0;
+ HEAP32[(((argp)+(12))>>2)] = termios.c_lflag || 0;
+ for (var i = 0; i < 32; i++) {
+ HEAP8[(argp + i)+(17)] = termios.c_cc[i] || 0;
+ }
+ return 0;
+ }
+ return 0;
+ }
+ case 21510:
+ case 21511:
+ case 21512: {
+ if (!stream.tty) return -59;
+ return 0; // no-op, not actually adjusting terminal settings
+ }
+ case 21506:
+ case 21507:
+ case 21508: {
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tcsets) {
+ var argp = syscallGetVarargP();
+ var c_iflag = HEAP32[((argp)>>2)];
+ var c_oflag = HEAP32[(((argp)+(4))>>2)];
+ var c_cflag = HEAP32[(((argp)+(8))>>2)];
+ var c_lflag = HEAP32[(((argp)+(12))>>2)];
+ var c_cc = []
+ for (var i = 0; i < 32; i++) {
+ c_cc.push(HEAP8[(argp + i)+(17)]);
+ }
+ return stream.tty.ops.ioctl_tcsets(stream.tty, op, { c_iflag, c_oflag, c_cflag, c_lflag, c_cc });
+ }
+ return 0; // no-op, not actually adjusting terminal settings
+ }
+ case 21519: {
+ if (!stream.tty) return -59;
+ var argp = syscallGetVarargP();
+ HEAP32[((argp)>>2)] = 0;
+ return 0;
+ }
+ case 21520: {
+ if (!stream.tty) return -59;
+ return -28; // not supported
+ }
+ case 21531: {
+ var argp = syscallGetVarargP();
+ return FS.ioctl(stream, op, argp);
+ }
+ case 21523: {
+ // TODO: in theory we should write to the winsize struct that gets
+ // passed in, but for now musl doesn't read anything on it
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tiocgwinsz) {
+ var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty);
+ var argp = syscallGetVarargP();
+ HEAP16[((argp)>>1)] = winsize[0];
+ HEAP16[(((argp)+(2))>>1)] = winsize[1];
+ }
+ return 0;
+ }
+ case 21524: {
+ // TODO: technically, this ioctl call should change the window size.
+ // but, since emscripten doesn't have any concept of a terminal window
+ // yet, we'll just silently throw it away as we do TIOCGWINSZ
+ if (!stream.tty) return -59;
+ return 0;
+ }
+ case 21515: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+ default: return -28; // not supported
+ }
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_lstat64(path, buf) {
+ try {
+
+ path = SYSCALLS.getStr(path);
+ return SYSCALLS.writeStat(buf, FS.lstat(path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_newfstatat(dirfd, path, buf, flags) {
+ try {
+
+ path = SYSCALLS.getStr(path);
+ var nofollow = flags & 256;
+ var allowEmpty = flags & 4096;
+ flags = flags & (~6400);
+ path = SYSCALLS.calculateAt(dirfd, path, allowEmpty);
+ return SYSCALLS.writeStat(buf, nofollow ? FS.lstat(path) : FS.stat(path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+
+ function ___syscall_openat(dirfd, path, flags, varargs) {
+ SYSCALLS.varargs = varargs;
+ try {
+
+ path = SYSCALLS.getStr(path);
+ path = SYSCALLS.calculateAt(dirfd, path);
+ var mode = varargs ? syscallGetVarargI() : 0;
+ return FS.open(path, flags, mode).fd;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_stat64(path, buf) {
+ try {
+
+ path = SYSCALLS.getStr(path);
+ return SYSCALLS.writeStat(buf, FS.stat(path));
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ }
+
+ var __abort_js = () =>
+ abort('');
+
+
+ var INT53_MAX = 9007199254740992;
+
+ var INT53_MIN = -9007199254740992;
+ var bigintToI53Checked = (num) => (num < INT53_MIN || num > INT53_MAX) ? NaN : Number(num);
+ function __munmap_js(addr, len, prot, flags, fd, offset) {
+ offset = bigintToI53Checked(offset);
+
+
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ if (prot & 2) {
+ SYSCALLS.doMsync(addr, stream, len, flags, offset);
+ }
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return -e.errno;
+ }
+ ;
+ }
+
+ var __tzset_js = (timezone, daylight, std_name, dst_name) => {
+ // TODO: Use (malleable) environment variables instead of system settings.
+ var currentYear = new Date().getFullYear();
+ var winter = new Date(currentYear, 0, 1);
+ var summer = new Date(currentYear, 6, 1);
+ var winterOffset = winter.getTimezoneOffset();
+ var summerOffset = summer.getTimezoneOffset();
+
+ // Local standard timezone offset. Local standard time is not adjusted for
+ // daylight savings. This code uses the fact that getTimezoneOffset returns
+ // a greater value during Standard Time versus Daylight Saving Time (DST).
+ // Thus it determines the expected output during Standard Time, and it
+ // compares whether the output of the given date the same (Standard) or less
+ // (DST).
+ var stdTimezoneOffset = Math.max(winterOffset, summerOffset);
+
+ // timezone is specified as seconds west of UTC ("The external variable
+ // `timezone` shall be set to the difference, in seconds, between
+ // Coordinated Universal Time (UTC) and local standard time."), the same
+ // as returned by stdTimezoneOffset.
+ // See http://pubs.opengroup.org/onlinepubs/009695399/functions/tzset.html
+ HEAPU32[((timezone)>>2)] = stdTimezoneOffset * 60;
+
+ HEAP32[((daylight)>>2)] = Number(winterOffset != summerOffset);
+
+ var extractZone = (timezoneOffset) => {
+ // Why inverse sign?
+ // Read here https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset
+ var sign = timezoneOffset >= 0 ? "-" : "+";
+
+ var absOffset = Math.abs(timezoneOffset)
+ var hours = String(Math.floor(absOffset / 60)).padStart(2, "0");
+ var minutes = String(absOffset % 60).padStart(2, "0");
+
+ return `UTC${sign}${hours}${minutes}`;
+ }
+
+ var winterName = extractZone(winterOffset);
+ var summerName = extractZone(summerOffset);
+ if (summerOffset < winterOffset) {
+ // Northern hemisphere
+ stringToUTF8(winterName, std_name, 17);
+ stringToUTF8(summerName, dst_name, 17);
+ } else {
+ stringToUTF8(winterName, dst_name, 17);
+ stringToUTF8(summerName, std_name, 17);
+ }
+ };
+
+ var _emscripten_get_now = () => performance.now();
+
+ var _emscripten_date_now = () => Date.now();
+
+ var nowIsMonotonic = 1;
+
+ var checkWasiClock = (clock_id) => clock_id >= 0 && clock_id <= 3;
+
+ function _clock_time_get(clk_id, ignored_precision, ptime) {
+ ignored_precision = bigintToI53Checked(ignored_precision);
+
+
+ if (!checkWasiClock(clk_id)) {
+ return 28;
+ }
+ var now;
+ // all wasi clocks but realtime are monotonic
+ if (clk_id === 0) {
+ now = _emscripten_date_now();
+ } else if (nowIsMonotonic) {
+ now = _emscripten_get_now();
+ } else {
+ return 52;
+ }
+ // "now" is in ms, and wasi times are in ns.
+ var nsec = Math.round(now * 1000 * 1000);
+ HEAP64[((ptime)>>3)] = BigInt(nsec);
+ return 0;
+ ;
+ }
+
+
+ var getHeapMax = () =>
+ // Stay one Wasm page short of 4GB: while e.g. Chrome is able to allocate
+ // full 4GB Wasm memories, the size will wrap back to 0 bytes in Wasm side
+ // for any code that deals with heap sizes, which would require special
+ // casing all heap size related code to treat 0 specially.
+ 2147483648;
+
+
+ var growMemory = (size) => {
+ var b = wasmMemory.buffer;
+ var pages = ((size - b.byteLength + 65535) / 65536) | 0;
+ try {
+ // round size grow request up to wasm page size (fixed 64KB per spec)
+ wasmMemory.grow(pages); // .grow() takes a delta compared to the previous size
+ updateMemoryViews();
+ return 1 /*success*/;
+ } catch(e) {
+ }
+ // implicit 0 return to save code size (caller will cast "undefined" into 0
+ // anyhow)
+ };
+ var _emscripten_resize_heap = (requestedSize) => {
+ var oldSize = HEAPU8.length;
+ // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned.
+ requestedSize >>>= 0;
+ // With multithreaded builds, races can happen (another thread might increase the size
+ // in between), so return a failure, and let the caller retry.
+
+ // Memory resize rules:
+ // 1. Always increase heap size to at least the requested size, rounded up
+ // to next page multiple.
+ // 2a. If MEMORY_GROWTH_LINEAR_STEP == -1, excessively resize the heap
+ // geometrically: increase the heap size according to
+ // MEMORY_GROWTH_GEOMETRIC_STEP factor (default +20%), At most
+ // overreserve by MEMORY_GROWTH_GEOMETRIC_CAP bytes (default 96MB).
+ // 2b. If MEMORY_GROWTH_LINEAR_STEP != -1, excessively resize the heap
+ // linearly: increase the heap size by at least
+ // MEMORY_GROWTH_LINEAR_STEP bytes.
+ // 3. Max size for the heap is capped at 2048MB-WASM_PAGE_SIZE, or by
+ // MAXIMUM_MEMORY, or by ASAN limit, depending on which is smallest
+ // 4. If we were unable to allocate as much memory, it may be due to
+ // over-eager decision to excessively reserve due to (3) above.
+ // Hence if an allocation fails, cut down on the amount of excess
+ // growth, in an attempt to succeed to perform a smaller allocation.
+
+ // A limit is set for how much we can grow. We should not exceed that
+ // (the wasm binary specifies it, so if we tried, we'd fail anyhow).
+ var maxHeapSize = getHeapMax();
+ if (requestedSize > maxHeapSize) {
+ return false;
+ }
+
+ // Loop through potential heap size increases. If we attempt a too eager
+ // reservation that fails, cut down on the attempted size and reserve a
+ // smaller bump instead. (max 3 times, chosen somewhat arbitrarily)
+ for (var cutDown = 1; cutDown <= 4; cutDown *= 2) {
+ var overGrownHeapSize = oldSize * (1 + 0.2 / cutDown); // ensure geometric growth
+ // but limit overreserving (default to capping at +96MB overgrowth at most)
+ overGrownHeapSize = Math.min(overGrownHeapSize, requestedSize + 100663296 );
+
+ var newSize = Math.min(maxHeapSize, alignMemory(Math.max(requestedSize, overGrownHeapSize), 65536));
+
+ var replacement = growMemory(newSize);
+ if (replacement) {
+
+ return true;
+ }
+ }
+ return false;
+ };
+
+ var ENV = {
+ };
+
+ var getExecutableName = () => thisProgram || './this.program';
+ var getEnvStrings = () => {
+ if (!getEnvStrings.strings) {
+ // Default values.
+ // Browser language detection #8751
+ var lang = ((typeof navigator == 'object' && navigator.languages && navigator.languages[0]) || 'C').replace('-', '_') + '.UTF-8';
+ var env = {
+ 'USER': 'web_user',
+ 'LOGNAME': 'web_user',
+ 'PATH': '/',
+ 'PWD': '/',
+ 'HOME': '/home/web_user',
+ 'LANG': lang,
+ '_': getExecutableName()
+ };
+ // Apply the user-provided values, if any.
+ for (var x in ENV) {
+ // x is a key in ENV; if ENV[x] is undefined, that means it was
+ // explicitly set to be so. We allow user code to do that to
+ // force variables with default values to remain unset.
+ if (ENV[x] === undefined) delete env[x];
+ else env[x] = ENV[x];
+ }
+ var strings = [];
+ for (var x in env) {
+ strings.push(`${x}=${env[x]}`);
+ }
+ getEnvStrings.strings = strings;
+ }
+ return getEnvStrings.strings;
+ };
+
+ var stringToAscii = (str, buffer) => {
+ for (var i = 0; i < str.length; ++i) {
+ HEAP8[buffer++] = str.charCodeAt(i);
+ }
+ // Null-terminate the string
+ HEAP8[buffer] = 0;
+ };
+ var _environ_get = (__environ, environ_buf) => {
+ var bufSize = 0;
+ getEnvStrings().forEach((string, i) => {
+ var ptr = environ_buf + bufSize;
+ HEAPU32[(((__environ)+(i*4))>>2)] = ptr;
+ stringToAscii(string, ptr);
+ bufSize += string.length + 1;
+ });
+ return 0;
+ };
+
+ var _environ_sizes_get = (penviron_count, penviron_buf_size) => {
+ var strings = getEnvStrings();
+ HEAPU32[((penviron_count)>>2)] = strings.length;
+ var bufSize = 0;
+ strings.forEach((string) => bufSize += string.length + 1);
+ HEAPU32[((penviron_buf_size)>>2)] = bufSize;
+ return 0;
+ };
+
+ function _fd_close(fd) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ FS.close(stream);
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ }
+
+ /** @param {number=} offset */
+ var doReadv = (stream, iov, iovcnt, offset) => {
+ var ret = 0;
+ for (var i = 0; i < iovcnt; i++) {
+ var ptr = HEAPU32[((iov)>>2)];
+ var len = HEAPU32[(((iov)+(4))>>2)];
+ iov += 8;
+ var curr = FS.read(stream, HEAP8, ptr, len, offset);
+ if (curr < 0) return -1;
+ ret += curr;
+ if (curr < len) break; // nothing more to read
+ if (typeof offset != 'undefined') {
+ offset += curr;
+ }
+ }
+ return ret;
+ };
+
+ function _fd_read(fd, iov, iovcnt, pnum) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doReadv(stream, iov, iovcnt);
+ HEAPU32[((pnum)>>2)] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ }
+
+
+ function _fd_seek(fd, offset, whence, newOffset) {
+ offset = bigintToI53Checked(offset);
+
+
+ try {
+
+ if (isNaN(offset)) return 61;
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ FS.llseek(stream, offset, whence);
+ HEAP64[((newOffset)>>3)] = BigInt(stream.position);
+ if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; // reset readdir state
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ ;
+ }
+
+ /** @param {number=} offset */
+ var doWritev = (stream, iov, iovcnt, offset) => {
+ var ret = 0;
+ for (var i = 0; i < iovcnt; i++) {
+ var ptr = HEAPU32[((iov)>>2)];
+ var len = HEAPU32[(((iov)+(4))>>2)];
+ iov += 8;
+ var curr = FS.write(stream, HEAP8, ptr, len, offset);
+ if (curr < 0) return -1;
+ ret += curr;
+ if (curr < len) {
+ // No more space to write.
+ break;
+ }
+ if (typeof offset != 'undefined') {
+ offset += curr;
+ }
+ }
+ return ret;
+ };
+
+ function _fd_write(fd, iov, iovcnt, pnum) {
+ try {
+
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doWritev(stream, iov, iovcnt);
+ HEAPU32[((pnum)>>2)] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
+ return e.errno;
+ }
+ }
+
+ var _llvm_eh_typeid_for = (type) => type;
+
+ var wasmTableMirror = [];
+
+ /** @type {WebAssembly.Table} */
+ var wasmTable;
+ var getWasmTableEntry = (funcPtr) => {
+ var func = wasmTableMirror[funcPtr];
+ if (!func) {
+ if (funcPtr >= wasmTableMirror.length) wasmTableMirror.length = funcPtr + 1;
+ /** @suppress {checkTypes} */
+ wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr);
+ }
+ return func;
+ };
+
+ var getCFunc = (ident) => {
+ var func = Module['_' + ident]; // closure exported function
+ return func;
+ };
+
+ var writeArrayToMemory = (array, buffer) => {
+ HEAP8.set(array, buffer);
+ };
+
+
+
+ var stackAlloc = (sz) => __emscripten_stack_alloc(sz);
+ var stringToUTF8OnStack = (str) => {
+ var size = lengthBytesUTF8(str) + 1;
+ var ret = stackAlloc(size);
+ stringToUTF8(str, ret, size);
+ return ret;
+ };
+
+
+
+
+
+ /**
+ * @param {string|null=} returnType
+ * @param {Array=} argTypes
+ * @param {Arguments|Array=} args
+ * @param {Object=} opts
+ */
+ var ccall = (ident, returnType, argTypes, args, opts) => {
+ // For fast lookup of conversion functions
+ var toC = {
+ 'string': (str) => {
+ var ret = 0;
+ if (str !== null && str !== undefined && str !== 0) { // null string
+ ret = stringToUTF8OnStack(str);
+ }
+ return ret;
+ },
+ 'array': (arr) => {
+ var ret = stackAlloc(arr.length);
+ writeArrayToMemory(arr, ret);
+ return ret;
+ }
+ };
+
+ function convertReturnValue(ret) {
+ if (returnType === 'string') {
+ return UTF8ToString(ret);
+ }
+ if (returnType === 'boolean') return Boolean(ret);
+ return ret;
+ }
+
+ var func = getCFunc(ident);
+ var cArgs = [];
+ var stack = 0;
+ if (args) {
+ for (var i = 0; i < args.length; i++) {
+ var converter = toC[argTypes[i]];
+ if (converter) {
+ if (stack === 0) stack = stackSave();
+ cArgs[i] = converter(args[i]);
+ } else {
+ cArgs[i] = args[i];
+ }
+ }
+ }
+ var ret = func(...cArgs);
+ function onDone(ret) {
+ if (stack !== 0) stackRestore(stack);
+ return convertReturnValue(ret);
+ }
+
+ ret = onDone(ret);
+ return ret;
+ };
+
+
+
+ /**
+ * @param {string=} returnType
+ * @param {Array=} argTypes
+ * @param {Object=} opts
+ */
+ var cwrap = (ident, returnType, argTypes, opts) => {
+ // When the function takes numbers and returns a number, we can just return
+ // the original function
+ var numericArgs = !argTypes || argTypes.every((type) => type === 'number' || type === 'boolean');
+ var numericRet = returnType !== 'string';
+ if (numericRet && numericArgs && !opts) {
+ return getCFunc(ident);
+ }
+ return (...args) => ccall(ident, returnType, argTypes, args, opts);
+ };
+
+ FS.createPreloadedFile = FS_createPreloadedFile;
+ FS.staticInit();
+ // Set module methods based on EXPORTED_RUNTIME_METHODS
+ ;
+
+ // This error may happen quite a bit. To avoid overhead we reuse it (and
+ // suffer a lack of stack info).
+ MEMFS.doesNotExistError = new FS.ErrnoError(44);
+ /** @suppress {checkTypes} */
+ MEMFS.doesNotExistError.stack = '';
+ ;
+var wasmImports = {
+ /** @export */
+ __assert_fail: ___assert_fail,
+ /** @export */
+ __cxa_begin_catch: ___cxa_begin_catch,
+ /** @export */
+ __cxa_end_catch: ___cxa_end_catch,
+ /** @export */
+ __cxa_find_matching_catch_2: ___cxa_find_matching_catch_2,
+ /** @export */
+ __cxa_find_matching_catch_3: ___cxa_find_matching_catch_3,
+ /** @export */
+ __cxa_rethrow: ___cxa_rethrow,
+ /** @export */
+ __cxa_throw: ___cxa_throw,
+ /** @export */
+ __cxa_uncaught_exceptions: ___cxa_uncaught_exceptions,
+ /** @export */
+ __resumeException: ___resumeException,
+ /** @export */
+ __syscall_fcntl64: ___syscall_fcntl64,
+ /** @export */
+ __syscall_fstat64: ___syscall_fstat64,
+ /** @export */
+ __syscall_getdents64: ___syscall_getdents64,
+ /** @export */
+ __syscall_ioctl: ___syscall_ioctl,
+ /** @export */
+ __syscall_lstat64: ___syscall_lstat64,
+ /** @export */
+ __syscall_newfstatat: ___syscall_newfstatat,
+ /** @export */
+ __syscall_openat: ___syscall_openat,
+ /** @export */
+ __syscall_stat64: ___syscall_stat64,
+ /** @export */
+ _abort_js: __abort_js,
+ /** @export */
+ _munmap_js: __munmap_js,
+ /** @export */
+ _tzset_js: __tzset_js,
+ /** @export */
+ clock_time_get: _clock_time_get,
+ /** @export */
+ emscripten_date_now: _emscripten_date_now,
+ /** @export */
+ emscripten_resize_heap: _emscripten_resize_heap,
+ /** @export */
+ environ_get: _environ_get,
+ /** @export */
+ environ_sizes_get: _environ_sizes_get,
+ /** @export */
+ fd_close: _fd_close,
+ /** @export */
+ fd_read: _fd_read,
+ /** @export */
+ fd_seek: _fd_seek,
+ /** @export */
+ fd_write: _fd_write,
+ /** @export */
+ invoke_diii,
+ /** @export */
+ invoke_fiii,
+ /** @export */
+ invoke_i,
+ /** @export */
+ invoke_ii,
+ /** @export */
+ invoke_iii,
+ /** @export */
+ invoke_iiii,
+ /** @export */
+ invoke_iiiii,
+ /** @export */
+ invoke_iiiiii,
+ /** @export */
+ invoke_iiiiiii,
+ /** @export */
+ invoke_iiiiiiii,
+ /** @export */
+ invoke_iiiiiiiii,
+ /** @export */
+ invoke_iiiiiiiiiii,
+ /** @export */
+ invoke_iiiiiiiiiiii,
+ /** @export */
+ invoke_iiiiiiiiiiiii,
+ /** @export */
+ invoke_iiiiiiijji,
+ /** @export */
+ invoke_iiiiij,
+ /** @export */
+ invoke_iiij,
+ /** @export */
+ invoke_iij,
+ /** @export */
+ invoke_ijiii,
+ /** @export */
+ invoke_jii,
+ /** @export */
+ invoke_jiiii,
+ /** @export */
+ invoke_v,
+ /** @export */
+ invoke_vi,
+ /** @export */
+ invoke_vii,
+ /** @export */
+ invoke_viii,
+ /** @export */
+ invoke_viiii,
+ /** @export */
+ invoke_viiiii,
+ /** @export */
+ invoke_viiiiii,
+ /** @export */
+ invoke_viiiiiii,
+ /** @export */
+ invoke_viiiiiiii,
+ /** @export */
+ invoke_viiiiiiiiii,
+ /** @export */
+ invoke_viiiiiiiiiiiiii,
+ /** @export */
+ invoke_viiiiiiiiiiiiiii,
+ /** @export */
+ invoke_viiij,
+ /** @export */
+ invoke_viij,
+ /** @export */
+ invoke_vij,
+ /** @export */
+ llvm_eh_typeid_for: _llvm_eh_typeid_for
+};
+var wasmExports = await createWasm();
+var ___wasm_call_ctors = wasmExports['__wasm_call_ctors']
+var _malloc = Module['_malloc'] = wasmExports['malloc']
+var _ntohs = wasmExports['ntohs']
+var _free = Module['_free'] = wasmExports['free']
+var ___cxa_free_exception = wasmExports['__cxa_free_exception']
+var _js_createSpendKeyData = Module['_js_createSpendKeyData'] = wasmExports['js_createSpendKeyData']
+var _js_createSpendKey = Module['_js_createSpendKey'] = wasmExports['js_createSpendKey']
+var _js_getSpendKey_s1 = Module['_js_getSpendKey_s1'] = wasmExports['js_getSpendKey_s1']
+var _js_getSpendKey_s2 = Module['_js_getSpendKey_s2'] = wasmExports['js_getSpendKey_s2']
+var _js_getSpendKey_r = Module['_js_getSpendKey_r'] = wasmExports['js_getSpendKey_r']
+var _js_getSpendKey_s1_hex = Module['_js_getSpendKey_s1_hex'] = wasmExports['js_getSpendKey_s1_hex']
+var _js_getSpendKey_s2_hex = Module['_js_getSpendKey_s2_hex'] = wasmExports['js_getSpendKey_s2_hex']
+var _js_getSpendKey_r_hex = Module['_js_getSpendKey_r_hex'] = wasmExports['js_getSpendKey_r_hex']
+var _js_createFullViewKey = Module['_js_createFullViewKey'] = wasmExports['js_createFullViewKey']
+var _js_createIncomingViewKey = Module['_js_createIncomingViewKey'] = wasmExports['js_createIncomingViewKey']
+var _js_getAddress = Module['_js_getAddress'] = wasmExports['js_getAddress']
+var _js_encodeAddress = Module['_js_encodeAddress'] = wasmExports['js_encodeAddress']
+var _js_isValidSparkAddress = Module['_js_isValidSparkAddress'] = wasmExports['js_isValidSparkAddress']
+var _js_decodeAddress = Module['_js_decodeAddress'] = wasmExports['js_decodeAddress']
+var _js_createMintedCoinData = Module['_js_createMintedCoinData'] = wasmExports['js_createMintedCoinData']
+var _js_createSparkMintRecipients = Module['_js_createSparkMintRecipients'] = wasmExports['js_createSparkMintRecipients']
+var _js_getRecipientVectorLength = Module['_js_getRecipientVectorLength'] = wasmExports['js_getRecipientVectorLength']
+var _js_getRecipientAt = Module['_js_getRecipientAt'] = wasmExports['js_getRecipientAt']
+var _js_getRecipientScriptPubKey = Module['_js_getRecipientScriptPubKey'] = wasmExports['js_getRecipientScriptPubKey']
+var _js_getRecipientScriptPubKeySize = Module['_js_getRecipientScriptPubKeySize'] = wasmExports['js_getRecipientScriptPubKeySize']
+var _js_getRecipientAmount = Module['_js_getRecipientAmount'] = wasmExports['js_getRecipientAmount']
+var _js_getRecipientSubtractFeeFromAmountFlag = Module['_js_getRecipientSubtractFeeFromAmountFlag'] = wasmExports['js_getRecipientSubtractFeeFromAmountFlag']
+var _js_deserializeCoin = Module['_js_deserializeCoin'] = wasmExports['js_deserializeCoin']
+var _js_getCoinFromMeta = Module['_js_getCoinFromMeta'] = wasmExports['js_getCoinFromMeta']
+var _js_getMetadata = Module['_js_getMetadata'] = wasmExports['js_getMetadata']
+var _js_getInputData = Module['_js_getInputData'] = wasmExports['js_getInputData']
+var _js_getInputDataWithMeta = Module['_js_getInputDataWithMeta'] = wasmExports['js_getInputDataWithMeta']
+var _js_identifyCoin = Module['_js_identifyCoin'] = wasmExports['js_identifyCoin']
+var _js_getIdentifiedCoinDiversifier = Module['_js_getIdentifiedCoinDiversifier'] = wasmExports['js_getIdentifiedCoinDiversifier']
+var _js_getIdentifiedCoinValue = Module['_js_getIdentifiedCoinValue'] = wasmExports['js_getIdentifiedCoinValue']
+var _js_getIdentifiedCoinMemo = Module['_js_getIdentifiedCoinMemo'] = wasmExports['js_getIdentifiedCoinMemo']
+var _js_getCSparkMintMetaHeight = Module['_js_getCSparkMintMetaHeight'] = wasmExports['js_getCSparkMintMetaHeight']
+var _js_getCSparkMintMetaId = Module['_js_getCSparkMintMetaId'] = wasmExports['js_getCSparkMintMetaId']
+var _js_getCSparkMintMetaIsUsed = Module['_js_getCSparkMintMetaIsUsed'] = wasmExports['js_getCSparkMintMetaIsUsed']
+var _js_getCSparkMintMetaMemo = Module['_js_getCSparkMintMetaMemo'] = wasmExports['js_getCSparkMintMetaMemo']
+var _js_getCSparkMintMetaNonce = Module['_js_getCSparkMintMetaNonce'] = wasmExports['js_getCSparkMintMetaNonce']
+var _js_getCSparkMintMetaDiversifier = Module['_js_getCSparkMintMetaDiversifier'] = wasmExports['js_getCSparkMintMetaDiversifier']
+var _js_getCSparkMintMetaValue = Module['_js_getCSparkMintMetaValue'] = wasmExports['js_getCSparkMintMetaValue']
+var _js_getCSparkMintMetaType = Module['_js_getCSparkMintMetaType'] = wasmExports['js_getCSparkMintMetaType']
+var _js_getCSparkMintMetaCoin = Module['_js_getCSparkMintMetaCoin'] = wasmExports['js_getCSparkMintMetaCoin']
+var _js_setCSparkMintMetaId = Module['_js_setCSparkMintMetaId'] = wasmExports['js_setCSparkMintMetaId']
+var _js_setCSparkMintMetaHeight = Module['_js_setCSparkMintMetaHeight'] = wasmExports['js_setCSparkMintMetaHeight']
+var _js_getCoinHash = Module['_js_getCoinHash'] = wasmExports['js_getCoinHash']
+var _js_getInputCoinDataCoverSetId = Module['_js_getInputCoinDataCoverSetId'] = wasmExports['js_getInputCoinDataCoverSetId']
+var _js_getInputCoinDataIndex = Module['_js_getInputCoinDataIndex'] = wasmExports['js_getInputCoinDataIndex']
+var _js_getInputCoinDataValue = Module['_js_getInputCoinDataValue'] = wasmExports['js_getInputCoinDataValue']
+var _js_getInputCoinDataTag_hex = Module['_js_getInputCoinDataTag_hex'] = wasmExports['js_getInputCoinDataTag_hex']
+var _js_getInputCoinDataTag_base64 = Module['_js_getInputCoinDataTag_base64'] = wasmExports['js_getInputCoinDataTag_base64']
+var _js_createRecipientsVectorForCreateSparkSpendTransaction = Module['_js_createRecipientsVectorForCreateSparkSpendTransaction'] = wasmExports['js_createRecipientsVectorForCreateSparkSpendTransaction']
+var _js_addRecipientForCreateSparkSpendTransaction = Module['_js_addRecipientForCreateSparkSpendTransaction'] = wasmExports['js_addRecipientForCreateSparkSpendTransaction']
+var _js_createPrivateRecipientsVectorForCreateSparkSpendTransaction = Module['_js_createPrivateRecipientsVectorForCreateSparkSpendTransaction'] = wasmExports['js_createPrivateRecipientsVectorForCreateSparkSpendTransaction']
+var _js_addPrivateRecipientForCreateSparkSpendTransaction = Module['_js_addPrivateRecipientForCreateSparkSpendTransaction'] = wasmExports['js_addPrivateRecipientForCreateSparkSpendTransaction']
+var _js_createCoinsListForCreateSparkSpendTransaction = Module['_js_createCoinsListForCreateSparkSpendTransaction'] = wasmExports['js_createCoinsListForCreateSparkSpendTransaction']
+var _js_addCoinToListForCreateSparkSpendTransaction = Module['_js_addCoinToListForCreateSparkSpendTransaction'] = wasmExports['js_addCoinToListForCreateSparkSpendTransaction']
+var _js_createCoverSetData = Module['_js_createCoverSetData'] = wasmExports['js_createCoverSetData']
+var _js_addCoinToCoverSetData = Module['_js_addCoinToCoverSetData'] = wasmExports['js_addCoinToCoverSetData']
+var _js_createCoverSetDataMapForCreateSparkSpendTransaction = Module['_js_createCoverSetDataMapForCreateSparkSpendTransaction'] = wasmExports['js_createCoverSetDataMapForCreateSparkSpendTransaction']
+var _js_addCoverSetDataForCreateSparkSpendTransaction = Module['_js_addCoverSetDataForCreateSparkSpendTransaction'] = wasmExports['js_addCoverSetDataForCreateSparkSpendTransaction']
+var _js_moveAddCoverSetDataForCreateSparkSpendTransaction = Module['_js_moveAddCoverSetDataForCreateSparkSpendTransaction'] = wasmExports['js_moveAddCoverSetDataForCreateSparkSpendTransaction']
+var _js_createIdAndBlockHashesMapForCreateSparkSpendTransaction = Module['_js_createIdAndBlockHashesMapForCreateSparkSpendTransaction'] = wasmExports['js_createIdAndBlockHashesMapForCreateSparkSpendTransaction']
+var _js_addIdAndBlockHashForCreateSparkSpendTransaction = Module['_js_addIdAndBlockHashForCreateSparkSpendTransaction'] = wasmExports['js_addIdAndBlockHashForCreateSparkSpendTransaction']
+var _js_createSparkSpendTransaction = Module['_js_createSparkSpendTransaction'] = wasmExports['js_createSparkSpendTransaction']
+var _js_getCreateSparkSpendTxResultSerializedSpend = Module['_js_getCreateSparkSpendTxResultSerializedSpend'] = wasmExports['js_getCreateSparkSpendTxResultSerializedSpend']
+var _js_getCreateSparkSpendTxResultSerializedSpendSize = Module['_js_getCreateSparkSpendTxResultSerializedSpendSize'] = wasmExports['js_getCreateSparkSpendTxResultSerializedSpendSize']
+var _js_getCreateSparkSpendTxResultOutputScriptsSize = Module['_js_getCreateSparkSpendTxResultOutputScriptsSize'] = wasmExports['js_getCreateSparkSpendTxResultOutputScriptsSize']
+var _js_getCreateSparkSpendTxResultOutputScriptAt = Module['_js_getCreateSparkSpendTxResultOutputScriptAt'] = wasmExports['js_getCreateSparkSpendTxResultOutputScriptAt']
+var _js_getCreateSparkSpendTxResultOutputScriptSizeAt = Module['_js_getCreateSparkSpendTxResultOutputScriptSizeAt'] = wasmExports['js_getCreateSparkSpendTxResultOutputScriptSizeAt']
+var _js_getCreateSparkSpendTxResultSpentCoinsSize = Module['_js_getCreateSparkSpendTxResultSpentCoinsSize'] = wasmExports['js_getCreateSparkSpendTxResultSpentCoinsSize']
+var _js_getCreateSparkSpendTxResultSpentCoinAt = Module['_js_getCreateSparkSpendTxResultSpentCoinAt'] = wasmExports['js_getCreateSparkSpendTxResultSpentCoinAt']
+var _js_getCreateSparkSpendTxResultFee = Module['_js_getCreateSparkSpendTxResultFee'] = wasmExports['js_getCreateSparkSpendTxResultFee']
+var _js_freeSpendKeyData = Module['_js_freeSpendKeyData'] = wasmExports['js_freeSpendKeyData']
+var _js_freeSpendKey = Module['_js_freeSpendKey'] = wasmExports['js_freeSpendKey']
+var _js_freeFullViewKey = Module['_js_freeFullViewKey'] = wasmExports['js_freeFullViewKey']
+var _js_freeIncomingViewKey = Module['_js_freeIncomingViewKey'] = wasmExports['js_freeIncomingViewKey']
+var _js_freeAddress = Module['_js_freeAddress'] = wasmExports['js_freeAddress']
+var _js_freeRecipientVector = Module['_js_freeRecipientVector'] = wasmExports['js_freeRecipientVector']
+var _js_freeCSparkMintMeta = Module['_js_freeCSparkMintMeta'] = wasmExports['js_freeCSparkMintMeta']
+var _js_freeInputCoinData = Module['_js_freeInputCoinData'] = wasmExports['js_freeInputCoinData']
+var _js_freeIdentifiedCoinData = Module['_js_freeIdentifiedCoinData'] = wasmExports['js_freeIdentifiedCoinData']
+var _js_freeCoin = Module['_js_freeCoin'] = wasmExports['js_freeCoin']
+var _js_freeSparkSpendRecipientsVector = Module['_js_freeSparkSpendRecipientsVector'] = wasmExports['js_freeSparkSpendRecipientsVector']
+var _js_freeSparkSpendPrivateRecipientsVector = Module['_js_freeSparkSpendPrivateRecipientsVector'] = wasmExports['js_freeSparkSpendPrivateRecipientsVector']
+var _js_freeSparkSpendCoinsList = Module['_js_freeSparkSpendCoinsList'] = wasmExports['js_freeSparkSpendCoinsList']
+var _js_freeCoverSetData = Module['_js_freeCoverSetData'] = wasmExports['js_freeCoverSetData']
+var _js_freeCoverSetDataMapForCreateSparkSpendTransaction = Module['_js_freeCoverSetDataMapForCreateSparkSpendTransaction'] = wasmExports['js_freeCoverSetDataMapForCreateSparkSpendTransaction']
+var _js_freeIdAndBlockHashesMap = Module['_js_freeIdAndBlockHashesMap'] = wasmExports['js_freeIdAndBlockHashesMap']
+var _js_freeCreateSparkSpendTxResult = Module['_js_freeCreateSparkSpendTxResult'] = wasmExports['js_freeCreateSparkSpendTxResult']
+var _htonl = wasmExports['htonl']
+var _htons = wasmExports['htons']
+var _emscripten_builtin_memalign = wasmExports['emscripten_builtin_memalign']
+var _setThrew = wasmExports['setThrew']
+var __emscripten_tempret_set = wasmExports['_emscripten_tempret_set']
+var __emscripten_stack_restore = wasmExports['_emscripten_stack_restore']
+var __emscripten_stack_alloc = wasmExports['_emscripten_stack_alloc']
+var _emscripten_stack_get_current = wasmExports['emscripten_stack_get_current']
+var ___cxa_decrement_exception_refcount = wasmExports['__cxa_decrement_exception_refcount']
+var ___cxa_increment_exception_refcount = wasmExports['__cxa_increment_exception_refcount']
+var ___cxa_can_catch = wasmExports['__cxa_can_catch']
+var ___cxa_get_exception_ptr = wasmExports['__cxa_get_exception_ptr']
+
+function invoke_vii(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_ii(index,a1) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_i(index) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)();
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiii(index,a1,a2,a3,a4,a5) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_vi(index,a1) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iii(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iij(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiii(index,a1,a2,a3,a4,a5,a6,a7) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_v(index) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)();
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viij(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_jii(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ return 0n;
+ }
+}
+
+function invoke_viiiii(index,a1,a2,a3,a4,a5) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_ijiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiij(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_vij(index,a1,a2) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiii(index,a1,a2,a3,a4,a5,a6) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiijji(index,a1,a2,a3,a4,a5,a6,a7,a8,a9) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiii(index,a1,a2,a3,a4,a5,a6) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiij(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiij(index,a1,a2,a3,a4,a5) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiii(index,a1,a2,a3,a4,a5,a6,a7) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_jiiii(index,a1,a2,a3,a4) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ return 0n;
+ }
+}
+
+function invoke_iiiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_fiii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_diii(index,a1,a2,a3) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_iiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11) {
+ var sp = stackSave();
+ try {
+ return getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+function invoke_viiiiiiiiiiiiiii(index,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15) {
+ var sp = stackSave();
+ try {
+ getWasmTableEntry(index)(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15);
+ } catch(e) {
+ stackRestore(sp);
+ if (e !== e+0) throw e;
+ _setThrew(1, 0);
+ }
+}
+
+
+// include: postamble.js
+// === Auto-generated postamble setup entry stuff ===
+
+Module['ccall'] = ccall;
+Module['cwrap'] = cwrap;
+
+
+function run() {
+
+ if (runDependencies > 0) {
+ dependenciesFulfilled = run;
+ return;
+ }
+
+ preRun();
+
+ // a preRun added a dependency, run will be called later
+ if (runDependencies > 0) {
+ dependenciesFulfilled = run;
+ return;
+ }
+
+ function doRun() {
+ // run may have just been called through dependencies being fulfilled just in this very frame,
+ // or while the async setStatus time below was happening
+ Module['calledRun'] = true;
+
+ if (ABORT) return;
+
+ initRuntime();
+
+ readyPromiseResolve(Module);
+ Module['onRuntimeInitialized']?.();
+
+ postRun();
+ }
+
+ if (Module['setStatus']) {
+ Module['setStatus']('Running...');
+ setTimeout(() => {
+ setTimeout(() => Module['setStatus'](''), 1);
+ doRun();
+ }, 1);
+ } else
+ {
+ doRun();
+ }
+}
+
+if (Module['preInit']) {
+ if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
+ while (Module['preInit'].length > 0) {
+ Module['preInit'].pop()();
+ }
+}
+
+run();
+
+// end include: postamble.js
+
+// include: postamble_modularize.js
+// In MODULARIZE mode we wrap the generated code in a factory function
+// and return either the Module itself, or a promise of the module.
+//
+// We assign to the `moduleRtn` global here and configure closure to see
+// this as and extern so it won't get minified.
+
+moduleRtn = readyPromise;
+
+// end include: postamble_modularize.js
+
+
+
+ return moduleRtn;
+}
+);
+})();
+export default Module;
diff --git a/packages/extension/src/libs/utils/wasmWorkerModule/spark.wasm b/packages/extension/src/libs/utils/wasmWorkerModule/spark.wasm
new file mode 100644
index 000000000..b45075cec
Binary files /dev/null and b/packages/extension/src/libs/utils/wasmWorkerModule/spark.wasm differ
diff --git a/packages/extension/src/providers/bitcoin/libs/activity-handlers/index.ts b/packages/extension/src/providers/bitcoin/libs/activity-handlers/index.ts
index 52fb5cc0e..a3b3c4201 100644
--- a/packages/extension/src/providers/bitcoin/libs/activity-handlers/index.ts
+++ b/packages/extension/src/providers/bitcoin/libs/activity-handlers/index.ts
@@ -1,3 +1,4 @@
import haskoinHandler from './providers/haskoin';
import ssHandler from './providers/ss';
-export { haskoinHandler, ssHandler };
+import firoHandler from './providers/firo';
+export { haskoinHandler, ssHandler, firoHandler };
diff --git a/packages/extension/src/providers/bitcoin/libs/activity-handlers/providers/firo/index.ts b/packages/extension/src/providers/bitcoin/libs/activity-handlers/providers/firo/index.ts
new file mode 100644
index 000000000..06015a74b
--- /dev/null
+++ b/packages/extension/src/providers/bitcoin/libs/activity-handlers/providers/firo/index.ts
@@ -0,0 +1,107 @@
+import MarketData from '@/libs/market-data';
+import { FiroTxType } from '@/providers/bitcoin/types';
+import { Activity, ActivityStatus, ActivityType } from '@/types/activity';
+import { BaseNetwork } from '@/types/base-network';
+
+export default async (
+ network: BaseNetwork,
+ pubkey: string,
+): Promise => {
+ return fetch(
+ `${network.node}/insight-api-zcoin/txs?address=${network.displayAddress(
+ pubkey,
+ )}&pageSize=40`,
+ )
+ .then(res => res.json())
+ .then(async (txs: { txs: FiroTxType[] }) => {
+ if ((txs as any).message) return [];
+ let tokenPrice = '0';
+ if (network.coingeckoID) {
+ const marketData = new MarketData();
+ await marketData
+ .getTokenPrice(network.coingeckoID)
+ .then(mdata => (tokenPrice = mdata || '0'));
+ }
+
+ const address = network.displayAddress(pubkey);
+
+ const cleanedTxs = txs.txs.map(tx => {
+ return {
+ ...tx,
+ vin: tx.vin.filter(vi => vi.addr),
+ vout: tx.vout.filter(vo => vo.scriptPubKey.addresses),
+ };
+ });
+
+ return cleanedTxs.map(tx => {
+ const isIncoming = !tx.vin.find(i => i.addr === address);
+
+ let toAddress = '';
+ let value = 0;
+
+ if (isIncoming) {
+ const relevantOut = tx.vout.find(
+ tx => tx.scriptPubKey.addresses![0] === address,
+ );
+ if (relevantOut) {
+ toAddress = relevantOut.scriptPubKey.addresses![0];
+ value = Number(relevantOut.value);
+ }
+ } else {
+ const relevantOut = tx.vout.find(
+ tx => tx.scriptPubKey.addresses![0] !== address,
+ );
+ if (relevantOut) {
+ toAddress = relevantOut.scriptPubKey.addresses![0];
+ value = Number(relevantOut.value);
+ } else {
+ toAddress = tx.vout[0].scriptPubKey.addresses![0];
+ value = Number(tx.vout[0].value);
+ }
+ }
+
+ const act: Activity = {
+ from: tx.vin?.[0]?.addr,
+ isIncoming,
+ network: network.name,
+ status:
+ tx.blockheight > 0
+ ? ActivityStatus.success
+ : ActivityStatus.pending,
+ timestamp: Number(tx.time) * 1000,
+ to: toAddress,
+ token: {
+ decimals: network.decimals,
+ icon: network.icon,
+ name: network.name_long,
+ symbol: network.currencyName,
+ coingeckoID: network.coingeckoID,
+ price: tokenPrice,
+ },
+ transactionHash: tx.txid,
+ type: ActivityType.transaction,
+ value: (+value * 100000000).toString(),
+ rawInfo: {
+ blockNumber: tx.blockheight,
+ fee: Number(tx?.fees),
+ inputs: tx.vin.map(input => ({
+ address: input.addr,
+ value: Number(input.value),
+ })),
+ outputs: tx.vout.map(output => ({
+ address: output.scriptPubKey.addresses![0],
+ value: Number(output.value),
+ pkscript: output.scriptPubKey.hex,
+ })),
+ transactionHash: tx.txid,
+ timestamp: Number(tx.time) * 1000,
+ },
+ };
+ return act;
+ });
+ })
+ .catch(error => {
+ console.log({ error });
+ return [];
+ });
+};
diff --git a/packages/extension/src/providers/bitcoin/libs/api-firo.ts b/packages/extension/src/providers/bitcoin/libs/api-firo.ts
new file mode 100644
index 000000000..ef13c9e90
--- /dev/null
+++ b/packages/extension/src/providers/bitcoin/libs/api-firo.ts
@@ -0,0 +1,113 @@
+import { BTCRawInfo } from '@/types/activity';
+import { ProviderAPIInterface } from '@/types/provider';
+import { BitcoinNetworkInfo, FiroTxType } from '../types';
+import { getAddress as getBitcoinAddress } from '../types/bitcoin-network';
+import { PublicFiroWallet } from './firo-wallet/public-firo-wallet';
+import { UnspentTxOutputModel } from '@/providers/bitcoin/libs/electrum-client/abstract-electrum';
+
+class API implements ProviderAPIInterface {
+ node: string;
+ networkInfo: BitcoinNetworkInfo;
+ #wallet: PublicFiroWallet;
+
+ constructor(node: string, networkInfo: BitcoinNetworkInfo) {
+ this.node = node;
+ this.networkInfo = networkInfo;
+ this.#wallet = new PublicFiroWallet();
+ }
+
+ public get api() {
+ return this;
+ }
+
+ private getAddress(pubkey: string) {
+ return getBitcoinAddress(pubkey, this.networkInfo);
+ }
+
+ async init(): Promise {}
+
+ async getRawTransaction(hash: string): Promise {
+ return fetch(`${this.node}/insight-api-zcoin/rawtx/${hash}`)
+ .then(res => res.json())
+ .then((tx: { hex: string; error: unknown }) => {
+ if ((tx as any).error) return null;
+ if (!tx.hex) return null;
+ return `0x${tx.hex}`;
+ });
+ }
+ async getTransactionStatus(hash: string): Promise {
+ return fetch(`${this.node}/insight-api-zcoin/tx/${hash}`)
+ .then(res => res.json())
+ .then((tx: FiroTxType) => {
+ if ((tx as any).message) return null;
+ if (tx.blockheight < 0) return null;
+ const rawInfo: BTCRawInfo = {
+ blockNumber: tx.blockheight,
+ fee: Number(tx.fees),
+ inputs: tx.vin
+ .filter(t => t.addresses && t.addresses.length)
+ .map(input => ({
+ address: input.addresses![0],
+ value: Number(input.value),
+ })),
+ outputs: tx.vout
+ .filter(
+ t => t.scriptPubKey.addresses && t.scriptPubKey.addresses.length,
+ )
+ .map(output => ({
+ address: output.scriptPubKey.addresses![0],
+ value: Number(output.value),
+ pkscript: output.scriptPubKey.hex,
+ })),
+ transactionHash: tx.txid,
+ timestamp: tx.time * 1000,
+ };
+ return rawInfo;
+ });
+ }
+
+ async getBalance(pubkey: string): Promise {
+ return this.#wallet
+ .getPublicBalance()
+ .then(balance => balance.toString())
+ .catch(() => '0');
+ }
+
+ async broadcastTxRPC(rawtx: string): Promise<{ txid: string }> {
+ return fetch(`${this.node}/insight-api-zcoin/tx/send`, {
+ method: 'POST',
+ headers: {
+ Accept: 'application/json',
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({ rawtx }),
+ })
+ .then(res => res.json())
+ .then(response => {
+ if (response.error) {
+ return Promise.reject(response.message);
+ }
+ return response;
+ });
+ }
+
+ async broadcastTx(txHex: string): Promise<{ txid: string }> {
+ const txid = await this.#wallet.broadcastTransaction(txHex);
+ return { txid };
+ }
+
+ async getUTXOs(pubkey: string): Promise {
+ const spendableUtxos = await this.#wallet.getOnlySpendableUtxos();
+
+ if ((spendableUtxos as any).message || !spendableUtxos.length) return [];
+ spendableUtxos.sort((a, b) => {
+ return a.amount - b.amount;
+ });
+
+ return spendableUtxos.map(el => ({
+ ...el,
+ value: el.satoshis,
+ }));
+ }
+}
+export default API;
diff --git a/packages/extension/src/providers/bitcoin/libs/api.ts b/packages/extension/src/providers/bitcoin/libs/api.ts
index ed4c562b8..623e35eb7 100644
--- a/packages/extension/src/providers/bitcoin/libs/api.ts
+++ b/packages/extension/src/providers/bitcoin/libs/api.ts
@@ -1,12 +1,12 @@
import { BTCRawInfo } from '@/types/activity';
import { ProviderAPIInterface } from '@/types/provider';
+import { toBN } from 'web3-utils';
import {
BitcoinNetworkInfo,
HaskoinBalanceType,
HaskoinTxType,
HaskoinUnspentType,
} from '../types';
-import { toBN } from 'web3-utils';
import { getAddress as getBitcoinAddress } from '../types/bitcoin-network';
import { filterOutOrdinals } from './filter-ordinals';
diff --git a/packages/extension/src/providers/bitcoin/libs/electrum-client/abstract-electrum.ts b/packages/extension/src/providers/bitcoin/libs/electrum-client/abstract-electrum.ts
new file mode 100644
index 000000000..35465e82e
--- /dev/null
+++ b/packages/extension/src/providers/bitcoin/libs/electrum-client/abstract-electrum.ts
@@ -0,0 +1,83 @@
+export class TransactionModel {
+ tx_hash: string = '';
+ height: number = 0;
+ tx_pos: number = 0;
+ value: number = 0;
+}
+
+export type UnspentTxOutputModel = {
+ vout: number;
+ raw: string;
+ height: number;
+ amount: number;
+ satoshis: number;
+ txid: string;
+ confirmations: number;
+ address: string;
+ scriptPubKey: string;
+};
+
+export type VIn = {
+ address: string;
+ addresses: Array;
+ txid: string;
+ value: number;
+ vout: number;
+ nFees: number;
+};
+
+export type VOut = {
+ addresses: Array;
+ value: number;
+ scriptPubKey: ScriptPubKey;
+};
+
+export type ScriptPubKey = {
+ addresses: Array;
+ type: string;
+};
+
+export class FullTransactionModel {
+ address: string = '';
+ blockHash: number = 0;
+ blockTime: number = 0;
+ confirmation: number = 0;
+ hash: string = '';
+ height: number = 0;
+ hex: string = '';
+ vin: Array = [];
+ vout: Array = [];
+ inputs: Array = [];
+ outputs: Array = [];
+ size: number = 0;
+ time: number = 0;
+ txid: string = '';
+ type: number = 0;
+ version: number = 0;
+ confirmations: number = 0;
+}
+
+export type AnonymitySetModel = {
+ coins: string[][];
+ blockHash: string;
+ setHash: string;
+};
+
+export type AnonymitySetMetaModel = {
+ size: number;
+ blockHash: string;
+ setHash: string;
+};
+
+export type UsedSerialsModel = {
+ serials: string[];
+};
+
+export type MyCoinModel = {
+ setId: number;
+ setHash: string;
+ coin: string[];
+ tag: string;
+ value: bigint;
+ isUsed: boolean;
+};
diff --git a/packages/extension/src/providers/bitcoin/libs/electrum-client/electrum-client.ts b/packages/extension/src/providers/bitcoin/libs/electrum-client/electrum-client.ts
new file mode 100644
index 000000000..c3cb4fabb
--- /dev/null
+++ b/packages/extension/src/providers/bitcoin/libs/electrum-client/electrum-client.ts
@@ -0,0 +1,450 @@
+import ElectrumClient from 'electrum-client-browser';
+import {
+ AnonymitySetMetaModel,
+ FullTransactionModel,
+ TransactionModel,
+ UnspentTxOutputModel,
+ UsedSerialsModel,
+} from './abstract-electrum';
+
+import * as bitcoin from 'bitcoinjs-lib';
+import BigNumber from 'bignumber.js';
+import { SATOSHI } from '@/providers/bitcoin/libs/firo-wallet/firo-wallet';
+
+const networkInfo = {
+ messagePrefix: '\x18Zcoin Signed Message:\n',
+ bech32: 'bc',
+ bip32: {
+ public: 0x0488b21e,
+ private: 0x0488ade4,
+ },
+ pubKeyHash: 0x52,
+ scriptHash: 0x07,
+ wif: 0xd2,
+};
+
+type Peer = {
+ host: string;
+ tcp: string | null;
+ ssl: string | null;
+};
+
+const hardcodedPeers: Peer[] = [
+ { host: 'electrumx.firo.org', tcp: '50001', ssl: '50002' },
+ { host: 'electrumx01.firo.org', tcp: '50001', ssl: '50002' },
+ { host: 'electrumx02.firo.org', tcp: '50001', ssl: '50002' },
+ { host: 'electrumx03.firo.org', tcp: '50001', ssl: '50002' },
+];
+
+/**
+ * Returns random hardcoded electrum server guaranteed to work
+ * at the time of writing.
+ *
+ * @returns {Promise<{tcp, host}|*>}
+ */
+function getRandomHardcodedPeer(): Peer {
+ const index = Math.floor(hardcodedPeers.length * Math.random());
+ return hardcodedPeers[index];
+}
+
+export default class FiroElectrum {
+ mainClient?: ElectrumClient = undefined;
+ wasConnectedAtLeastOnce = false;
+
+ private all_anonymity_sets_meta: AnonymitySetMetaModel[] = [];
+
+ async getCoinIDs(coinHashes: string[]) {
+ return this.mainClient
+ ?.request('spark.getsparkmintmetadata', [
+ {
+ coinHashes: coinHashes,
+ },
+ ])
+ .catch(error => {
+ console.error(
+ 'electrum_wallet:getCoinIDs',
+ 'Failed to fetch coin IDs:',
+ error,
+ );
+ throw new Error(error);
+ });
+ }
+
+ async connectMain() {
+ try {
+ const peer = getRandomHardcodedPeer();
+ this.mainClient = new ElectrumClient(peer.host, 50004, 'wss');
+
+ await this.mainClient.connect('electrum-client-browser', '1.4');
+
+ this.wasConnectedAtLeastOnce = true;
+ } catch (e) {
+ this.mainClient = undefined;
+ console.error('electrum_wallet:connectMain', e);
+ if (!this.mainClient) {
+ console.warn('electrum_wallet:connectMain', 'retry');
+ setTimeout(() => {
+ this.connectMain();
+ }, 5000);
+ }
+ }
+ }
+
+ async disconnect() {
+ try {
+ await this.mainClient?.close();
+
+ this.wasConnectedAtLeastOnce = false;
+ } catch (e) {
+ console.error('electrum_wallet:disconnect', e);
+ }
+ }
+
+ async getBalanceByAddress(address: string): Promise {
+ const script = bitcoin.address.toOutputScript(address, networkInfo);
+ const hash = bitcoin.crypto.sha256(script);
+
+ const reversedHash = Buffer.from(hash.reverse());
+ return await this.mainClient!.blockchain_scripthash_getBalance(
+ reversedHash.toString('hex'),
+ );
+ }
+
+ async getUsedCoinsTags(startPoint = 0): Promise<{ tags: string[] }> {
+ console.log(
+ 'electrum_wallet:getUsedCoinsTags',
+ 'startPoint',
+ startPoint.toString(),
+ );
+ return await this.mainClient!.request('spark.getusedcoinstags', [
+ startPoint.toString(),
+ ]);
+ }
+
+ async getUsedCoinsTagsTxHashes(
+ startNumber: number,
+ ): Promise<{ tagsandtxids: string[] }> {
+ return await this.mainClient!.request('spark.getusedcoinstagstxhashes', [
+ startNumber.toString(),
+ ]);
+ }
+
+ async getTransactionsByAddress(
+ address: string,
+ ): Promise> {
+ const script = this.addressToScript(address);
+ const hash = bitcoin.crypto.sha256(Buffer.from(script));
+
+ const reversedHash = Buffer.from(hash.reverse());
+ const history = await this.mainClient?.blockchain_scripthash_getHistory(
+ reversedHash.toString('hex'),
+ );
+ return history as TransactionModel[];
+ }
+
+ async getTransactionsFullByAddress(
+ address: string,
+ ): Promise> {
+ const txs = await this.getTransactionsByAddress(address);
+ const ret = [];
+ for (const tx of txs) {
+ const full = await this.mainClient?.blockchain_transaction_get(
+ tx.tx_hash,
+ true,
+ );
+ full.address = address;
+ for (const input of full.vin) {
+ // now we need to fetch previous TX where this VIN became an output, so we can see its amount
+ const prevTxForVin = await this.mainClient?.blockchain_transaction_get(
+ input.txid,
+ true,
+ );
+ if (
+ prevTxForVin &&
+ prevTxForVin.vout &&
+ prevTxForVin.vout[input.vout]
+ ) {
+ input.value = prevTxForVin.vout[input.vout].value;
+ // also, we extract destination address from prev output:
+ if (
+ prevTxForVin.vout[input.vout].scriptPubKey &&
+ prevTxForVin.vout[input.vout].scriptPubKey.addresses
+ ) {
+ input.addresses =
+ prevTxForVin.vout[input.vout].scriptPubKey.addresses;
+ }
+ }
+ }
+
+ for (const output of full.vout) {
+ if (output.scriptPubKey && output.scriptPubKey.addresses) {
+ output.addresses = output.scriptPubKey.addresses;
+ }
+ }
+ full.inputs = full.vin;
+ full.outputs = full.vout;
+ // delete full.vin;
+ // delete full.vout;
+ // delete full.hex; // compact
+ // delete full.hash; // compact
+ ret.push(full);
+ }
+ return ret;
+ }
+
+ // async multiGetHistoryByAddress(
+ // addresses: Array,
+ // batchsize: number = 100,
+ // ): Promise
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
![]()
-
-
![]()
+
+
{{ account.name }}
+
{{ network.displayAddress(account.address) }}
+
-
-
{{ account.name }}
-
{{ network.displayAddress(account.address) }}
+
+ copy
+
+
+
+
-
- copy
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ sparkAccount?.defaultAddress }}
+
+
+
+
+ copy
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
![]()
+
+
+
{{ account.name }}
+
{{ network.displayAddress(account.address) }}
+
+
+
+ copy
+
+
+
+
diff --git a/packages/extension/src/ui/action/views/verify-send-to-spark-transaction/index.vue b/packages/extension/src/ui/action/views/verify-send-to-spark-transaction/index.vue
new file mode 100644
index 000000000..eabb1560b
--- /dev/null
+++ b/packages/extension/src/ui/action/views/verify-send-to-spark-transaction/index.vue
@@ -0,0 +1,465 @@
+
+
+
+
+
+
+
+ Double check the information and confirm transaction
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packages/extension/src/ui/action/workers/sparkCoinInfoWorker.ts b/packages/extension/src/ui/action/workers/sparkCoinInfoWorker.ts
new file mode 100644
index 000000000..a445e87d7
--- /dev/null
+++ b/packages/extension/src/ui/action/workers/sparkCoinInfoWorker.ts
@@ -0,0 +1,181 @@
+import {
+ getSparkCoinInfo,
+ SparkCoinValue,
+} from '@/libs/spark-handler/getSparkCoinInfo.ts';
+import { DB_DATA_KEYS, IndexedDBHelper } from '@action/db/indexedDB.ts';
+import { wasmWorkerInstance } from '@/libs/utils/wasm-worker-loader.ts';
+import {
+ getIncomingViewKey,
+ getSpendKeyObj,
+} from '@/libs/spark-handler/generateSparkWallet.ts';
+import {
+ AnonymitySetModel,
+ MyCoinModel,
+} from '@/providers/bitcoin/libs/electrum-client/abstract-electrum';
+import { differenceSets } from '@action/utils/set-utils.ts';
+
+const db = new IndexedDBHelper();
+
+export interface CheckedCoinData {
+ coin: SparkCoinValue;
+ setId: number;
+ tag: string;
+ setHash: string;
+}
+
+export interface OwnedCoinData {
+ coin: string[];
+ setId: number;
+ isUsed: boolean;
+ setHash: string;
+ tag: string;
+ value: bigint;
+}
+
+const removeDuplicates = (coinsResult: Set
): Set => {
+ const arr = Array.from(coinsResult);
+ const seen = new Set();
+ const deduped = [];
+
+ for (let i = arr.length - 1; i >= 0; i--) {
+ const item = arr[i];
+ const key = `${item.tag}:${item.coin.join('|')}`;
+
+ if (!seen.has(key)) {
+ seen.add(key);
+ deduped.push(item);
+ }
+ }
+
+ return new Set(deduped.reverse());
+};
+
+async function fetchAllCoinInfos(
+ firstCoinSetId: number,
+ myCoinsMap: Map,
+ allSets: AnonymitySetModel[],
+ fullViewKeyObj: number,
+ incomingViewKeyObj: number,
+ Module: WasmModule,
+) {
+ try {
+ const allPromises: Promise[] = [];
+ const finalResult: CheckedCoinData[] = [];
+
+ allSets.slice(firstCoinSetId).forEach((set, index) => {
+ set.coins.forEach(coin => {
+ const setCoin = `${coin.join()}${set.setHash}`;
+
+ if (!myCoinsMap.has(setCoin)) {
+ const promise = getSparkCoinInfo({
+ coin,
+ fullViewKeyObj,
+ incomingViewKeyObj,
+ wasmModule: Module,
+ }).then(async res => {
+ finalResult.push({
+ coin: res,
+ setId: index + 1,
+ tag: res.tag,
+ setHash: set.setHash,
+ });
+ // TODO: initially DB_DATA_KEYS.myCoins is undefined in loop , better to append one by one
+ // const oldCoins = await db.readData(
+ // DB_DATA_KEYS.myCoins,
+ // );
+ // console.log({ oldCoins });
+ // await db.saveData(DB_DATA_KEYS.myCoins, [
+ // ...(oldCoins ?? []),
+ // {
+ // coin: res.originalCoin,
+ // setId: index + 1,
+ // setHash: set.setHash,
+ // value: res.value,
+ // tag: res.tag,
+ // isUsed: false,
+ // },
+ // ]);
+ return res;
+ });
+
+ allPromises.push(promise);
+ }
+ });
+ });
+
+ await Promise.allSettled(allPromises);
+ const myCoins = finalResult.map(coinData => ({
+ coin: coinData.coin.originalCoin,
+ setId: coinData.setId,
+ value: coinData.coin.value,
+ tag: coinData.tag,
+ setHash: coinData.setHash,
+ isUsed: false,
+ }));
+
+ const savedMyCoins = (await db.readData(DB_DATA_KEYS.myCoins)) || [];
+ const updatedMyCoinsSet = differenceSets(
+ new Set(savedMyCoins),
+ new Set(myCoins),
+ );
+ const dedupedMyCoinsSet = removeDuplicates(updatedMyCoinsSet);
+ await db.saveData(DB_DATA_KEYS.myCoins, Array.from(dedupedMyCoinsSet));
+
+ return Array.from(updatedMyCoinsSet);
+ } catch (err) {
+ console.error(err);
+ }
+}
+
+addEventListener('message', async () => {
+ const Module = await wasmWorkerInstance.getInstance();
+
+ const spendKeyObj = await getSpendKeyObj(Module);
+
+ if (!spendKeyObj || spendKeyObj === 0) {
+ throw new Error('Failed to create spendKeyObj');
+ }
+
+ const incomingViewKey = await getIncomingViewKey(Module, spendKeyObj);
+
+ if (!incomingViewKey) {
+ throw new Error('Failed to create IncomingViewKey');
+ }
+
+ const { incomingViewKeyObj, fullViewKeyObj } = incomingViewKey;
+
+ if (!incomingViewKeyObj || incomingViewKeyObj === 0 || fullViewKeyObj === 0) {
+ throw new Error('Failed to create IncomingViewKey and fullViewKeyObj');
+ }
+
+ const allSets = await db.readData(DB_DATA_KEYS.sets);
+ const myCoins = await db.readData(DB_DATA_KEYS.myCoins);
+
+ const myCoinsMap = new Map();
+ (myCoins ?? []).forEach(coin => {
+ myCoinsMap.set(`${coin.coin.join()}${coin.setHash}`, coin);
+ });
+
+ const lastCoinSetId = (myCoins ?? []).at(-1)?.setId ?? 0;
+
+ const firstCoinSetId = lastCoinSetId - 1 < 1 ? 0 : lastCoinSetId - 1;
+
+ const result = await fetchAllCoinInfos(
+ firstCoinSetId,
+ myCoinsMap,
+ allSets,
+ fullViewKeyObj,
+ incomingViewKeyObj,
+ Module,
+ );
+
+ Module.ccall(
+ 'js_freeIncomingViewKey',
+ null,
+ ['number'],
+ [incomingViewKeyObj],
+ );
+ Module.ccall('js_freeFullViewKey', null, ['number'], [fullViewKeyObj]);
+
+ postMessage(result);
+});
diff --git a/packages/extension/src/ui/onboard/App.vue b/packages/extension/src/ui/onboard/App.vue
index b9b15f7c6..142f2350e 100644
--- a/packages/extension/src/ui/onboard/App.vue
+++ b/packages/extension/src/ui/onboard/App.vue
@@ -8,10 +8,13 @@
-
+
Pin the Enkrypt extension
Click on in your browser
@@ -72,7 +75,8 @@ const wrapClassObject = () => {