diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 70ec96b9db1..7ea3c1bfbae 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -23,6 +23,7 @@ on: env: CacheFolders: | communication + network common desktop desktop-package diff --git a/README.md b/README.md index 4dd9d922179..a7fe1b0a4d5 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,17 @@ If you want to interact with Huly programmatically, check out our [API Client](. You can find API usage examples in the [Huly examples](https://github.com/hcengineering/huly-examples) repository. +## Huly Virtual Network + +The platform features a distributed network architecture that enables scalable, fault-tolerant communication between accounts, workspaces, and nodes. The [Huly Virtual Network](./network/README.md) provides: + +- **Distributed Load Balancing**: Intelligent routing across multiple nodes using consistent hashing +- **Multi-Tenant Architecture**: Secure workspace isolation with role-based access control +- **Fault Tolerance**: Automatic failover and recovery mechanisms +- **Real-time Communication**: Event-driven architecture with broadcast capabilities + +For detailed information about the network architecture, deployment, and API reference, see the [Network Documentation](./network/README.md). + ## Table of Contents - [Huly Platform](#huly-platform) @@ -35,6 +46,7 @@ You can find API usage examples in the [Huly examples](https://github.com/hcengi - [Self-Hosting](#self-hosting) - [Activity](#activity) - [API Client](#api-client) + - [Huly Virtual Network](#huly-virtual-network) - [Table of Contents](#table-of-contents) - [Pre-requisites](#pre-requisites) - [Verification](#verification) @@ -106,6 +118,7 @@ This project uses GitHub Packages for dependency management. To successfully dow Follow these steps: 1. Generate a GitHub Token: + - Log in to your GitHub account - Go to **Settings** > **Developer settings** > **Personal access tokens** (https://github.com/settings/personal-access-tokens) - Click **Generate new token** @@ -113,13 +126,13 @@ Follow these steps: - Generate the token and copy it 2. Authenticate with npm: + ```bash npm login --registry=https://npm.pkg.github.com ``` When prompted, enter your GitHub username, use the generated token as your password - ## Fast start ```bash @@ -280,6 +293,7 @@ This guide describes the nuances of building and running the application from so #### Disk Space Requirements Ensure you have sufficient disk space available: + - A fully deployed local application in clean Docker will consume slightly more than **35 GB** of WSL virtual disk space - The application folder after build (sources + artifacts) will occupy **4.5 GB** @@ -303,6 +317,7 @@ Make sure Docker is accessible from WSL: Windows Git often automatically replaces line endings. Since most build scripts are `.sh` files, ensure your Windows checkout doesn't break them. **Solution options:** + - Checkout from WSL instead of Windows - Configure Git on Windows to disable auto-replacement: ```bash @@ -343,6 +358,7 @@ After these preparations, the build instructions should work without issues. When starting the application (`rush docker:up`), some network ports in Windows might be occupied. You can fix port mapping in the `\dev\docker-compose.yaml` file. **Important:** Depending on which port you change, you'll need to: + 1. Find what's using that port 2. Update the new address in the corresponding service configuration diff --git a/common/config/rush/pnpm-lock.yaml b/common/config/rush/pnpm-lock.yaml index 7ee56d3b07e..99981d55e7c 100644 --- a/common/config/rush/pnpm-lock.yaml +++ b/common/config/rush/pnpm-lock.yaml @@ -835,6 +835,12 @@ importers: '@rush-temp/mongo': specifier: file:./projects/mongo.tgz version: file:projects/mongo.tgz(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(gcp-metadata@5.3.0(encoding@0.1.13))(snappy@7.2.2)(socks@2.8.3)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)) + '@rush-temp/network': + specifier: file:./projects/network.tgz + version: file:projects/network.tgz(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)) + '@rush-temp/network-zeromq': + specifier: file:./projects/network-zeromq.tgz + version: file:projects/network-zeromq.tgz(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)) '@rush-temp/notification': specifier: file:./projects/notification.tgz version: file:projects/notification.tgz(@babel/core@7.23.9)(@jest/types@29.6.3)(@types/node@22.15.29)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)) @@ -2389,6 +2395,9 @@ importers: yjs: specifier: ^13.6.23 version: 13.6.23 + zeromq: + specifier: ^6.5.0 + version: 6.5.0 zod: specifier: ^3.22.4 version: 3.24.2 @@ -5444,6 +5453,14 @@ packages: resolution: {integrity: sha512-2NrKTsPik2KN7fUtLuhOmZFyaailUgcbTQKLiXEvmcNdiCBYdpopLuCl7wU9l1MffhKvekpCb05WoB4os4ZSdA==, tarball: file:projects/mongo.tgz} version: 0.0.0 + '@rush-temp/network-zeromq@file:projects/network-zeromq.tgz': + resolution: {integrity: sha512-hXbmbcq2y/BwdXvrOeP5hpZYRqssUggoqp4cRGU5rFZRLRbQLDDU9k3AkCVi385ijUkp0EpsaDhzyEwgY4pXTw==, tarball: file:projects/network-zeromq.tgz} + version: 0.0.0 + + '@rush-temp/network@file:projects/network.tgz': + resolution: {integrity: sha512-NZhhGCoxzJydJ7hjYD8QJFl/iQPt6Vj6hrRsTsKBBO2iS25HRxjxZ1CTTTkbSU/3ZzKBZ8qrlJZuc4LIsyezCA==, tarball: file:projects/network.tgz} + version: 0.0.0 + '@rush-temp/notification-assets@file:projects/notification-assets.tgz': resolution: {integrity: sha512-vQTl0ZJNng9Y+dSKnBRwSbf1c6zVJp2xTUCyH0pB4DiJcco0GiHxu0vS1eVRM6X1sgCQ9w7bxX8S6Wyvp9IcOw==, tarball: file:projects/notification-assets.tgz} version: 0.0.0 @@ -8383,6 +8400,10 @@ packages: resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==} engines: {node: '>=0.8'} + cmake-ts@1.0.2: + resolution: {integrity: sha512-5l++JHE7MxFuyV/OwJf3ek7ZZN1aGPFPM5oUz6AnK5inQAPe4TFXRMz5sA2qg2FRgByPWdqO+gSfIPo8GzoKNQ==} + hasBin: true + co-body@6.1.0: resolution: {integrity: sha512-m7pOT6CdLN7FuXUcpuz/8lfQ/L77x8SchHCF4G0RBTJO20Wzmhn5Sp4/5WsKy8OSpifBSUrmg83qEqaDHdyFuQ==} @@ -9883,6 +9904,10 @@ packages: resolution: {integrity: sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==} engines: {node: '>= 0.4.0'} + fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + fill-range@7.1.1: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} @@ -11901,6 +11926,10 @@ packages: node-addon-api@1.7.2: resolution: {integrity: sha512-ibPK3iA+vaY1eEjESkQkM0BbCqFOaZMiXRTtdB0u7b4djtY6JnsjvPdUHVMg6xQt3B8fpTTWHI9A+ADjM9frzg==} + node-addon-api@8.5.0: + resolution: {integrity: sha512-/bRZty2mXUIFY/xU5HLvveNHlswNJej+RnxBjOMkidWfwZzgTbPG1E3K5TOxRLOR+5hX7bSofy8yf1hZevMS8A==} + engines: {node: ^18 || ^20 || >= 21} + node-api-version@0.2.0: resolution: {integrity: sha512-fthTTsi8CxaBXMaBAD7ST2uylwvsnYxh2PfaScwpMhos6KlSFajXQPcM4ogNE1q2s3Lbz9GCGqeIHC+C6OZnKg==} @@ -14668,6 +14697,10 @@ packages: zen-observable@0.8.15: resolution: {integrity: sha512-PQ2PC7R9rslx84ndNBZB/Dkv8V8fZEpk83RLgXtYd0fwUgEjseMn1Dgajh2x6S8QbZAFa9p2qVCEuYZNgve0dQ==} + zeromq@6.5.0: + resolution: {integrity: sha512-vWOrt19lvcXTxu5tiHXfEGQuldSlU+qZn2TT+4EbRQzaciWGwNZ99QQTolQOmcwVgZLodv+1QfC6UZs2PX/6pQ==} + engines: {node: '>= 12'} + zip-stream@6.0.1: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} @@ -14731,17 +14764,17 @@ snapshots: '@aws-sdk/types': 3.734.0 '@aws-sdk/util-locate-window': 3.568.0 '@smithy/util-utf8': 2.3.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-crypto/sha256-js@5.2.0': dependencies: '@aws-crypto/util': 5.2.0 '@aws-sdk/types': 3.734.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-crypto/supports-web-crypto@5.2.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@aws-crypto/util@5.2.0': dependencies: @@ -14910,7 +14943,7 @@ snapshots: '@smithy/types': 4.1.0 '@smithy/util-middleware': 4.0.1 fast-xml-parser: 4.4.1 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/credential-provider-env@3.734.0': dependencies: @@ -14918,7 +14951,7 @@ snapshots: '@aws-sdk/types': 3.734.0 '@smithy/property-provider': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/credential-provider-http@3.734.0': dependencies: @@ -14931,7 +14964,7 @@ snapshots: '@smithy/smithy-client': 4.1.3 '@smithy/types': 4.1.0 '@smithy/util-stream': 4.0.2 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/credential-provider-ini@3.734.0': dependencies: @@ -14947,7 +14980,7 @@ snapshots: '@smithy/property-provider': 4.0.1 '@smithy/shared-ini-file-loader': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 transitivePeerDependencies: - aws-crt @@ -14964,7 +14997,7 @@ snapshots: '@smithy/property-provider': 4.0.1 '@smithy/shared-ini-file-loader': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 transitivePeerDependencies: - aws-crt @@ -14975,7 +15008,7 @@ snapshots: '@smithy/property-provider': 4.0.1 '@smithy/shared-ini-file-loader': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/credential-provider-sso@3.734.0': dependencies: @@ -14986,7 +15019,7 @@ snapshots: '@smithy/property-provider': 4.0.1 '@smithy/shared-ini-file-loader': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 transitivePeerDependencies: - aws-crt @@ -14997,7 +15030,7 @@ snapshots: '@aws-sdk/types': 3.734.0 '@smithy/property-provider': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 transitivePeerDependencies: - aws-crt @@ -15050,7 +15083,7 @@ snapshots: '@aws-sdk/types': 3.734.0 '@smithy/protocol-http': 5.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/middleware-location-constraint@3.734.0': dependencies: @@ -15062,14 +15095,14 @@ snapshots: dependencies: '@aws-sdk/types': 3.734.0 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/middleware-recursion-detection@3.734.0': dependencies: '@aws-sdk/types': 3.734.0 '@smithy/protocol-http': 5.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/middleware-sdk-s3@3.734.0': dependencies: @@ -15086,7 +15119,7 @@ snapshots: '@smithy/util-middleware': 4.0.1 '@smithy/util-stream': 4.0.2 '@smithy/util-utf8': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/middleware-ssec@3.734.0': dependencies: @@ -15102,7 +15135,7 @@ snapshots: '@smithy/core': 3.1.2 '@smithy/protocol-http': 5.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/nested-clients@3.734.0': dependencies: @@ -15154,7 +15187,7 @@ snapshots: '@smithy/types': 4.1.0 '@smithy/util-config-provider': 4.0.0 '@smithy/util-middleware': 4.0.1 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/s3-request-presigner@3.738.0': dependencies: @@ -15174,7 +15207,7 @@ snapshots: '@smithy/protocol-http': 5.0.1 '@smithy/signature-v4': 5.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/token-providers@3.734.0': dependencies: @@ -15190,7 +15223,7 @@ snapshots: '@aws-sdk/types@3.734.0': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/util-arn-parser@3.723.0': dependencies: @@ -15201,7 +15234,7 @@ snapshots: '@aws-sdk/types': 3.734.0 '@smithy/types': 4.1.0 '@smithy/util-endpoints': 3.0.1 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/util-format-url@3.734.0': dependencies: @@ -15212,14 +15245,14 @@ snapshots: '@aws-sdk/util-locate-window@3.568.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/util-user-agent-browser@3.734.0': dependencies: '@aws-sdk/types': 3.734.0 '@smithy/types': 4.1.0 bowser: 2.11.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/util-user-agent-node@3.734.0': dependencies: @@ -15227,7 +15260,7 @@ snapshots: '@aws-sdk/types': 3.734.0 '@smithy/node-config-provider': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@aws-sdk/xml-builder@3.734.0': dependencies: @@ -15548,7 +15581,7 @@ snapshots: node-gyp: 9.4.1 ora: 5.4.1 read-binary-file-arch: 1.0.6 - semver: 7.7.2 + semver: 7.6.3 tar: 6.2.0 yargs: 17.7.2 transitivePeerDependencies: @@ -15989,7 +16022,7 @@ snapshots: jest-util: 29.7.0 jest-validate: 29.7.0 jest-watcher: 29.7.0 - micromatch: 4.0.8 + micromatch: 4.0.5 pretty-format: 29.7.0 slash: 3.0.0 strip-ansi: 6.0.1 @@ -16100,7 +16133,7 @@ snapshots: jest-haste-map: 29.7.0 jest-regex-util: 29.6.3 jest-util: 29.7.0 - micromatch: 4.0.8 + micromatch: 4.0.5 pirates: 4.0.6 slash: 3.0.0 write-file-atomic: 4.0.2 @@ -16550,7 +16583,7 @@ snapshots: '@npmcli/fs@2.1.2': dependencies: '@gar/promisify': 1.1.3 - semver: 7.7.2 + semver: 7.6.3 '@npmcli/move-file@2.0.1': dependencies: @@ -17451,7 +17484,7 @@ snapshots: extract-zip: 2.0.1 progress: 2.0.3 proxy-agent: 6.4.0 - semver: 7.7.2 + semver: 7.6.3 tar-fs: 3.0.6 unbzip2-stream: 1.4.3 yargs: 17.7.2 @@ -24473,6 +24506,62 @@ snapshots: - supports-color - ts-node + '@rush-temp/network-zeromq@file:projects/network-zeromq.tgz(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3))': + dependencies: + '@types/jest': 29.5.12 + '@types/node': 22.15.29 + '@types/uuid': 8.3.4 + '@typescript-eslint/eslint-plugin': 6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.56.0)(typescript@5.8.3))(eslint@8.56.0)(typescript@5.8.3) + '@typescript-eslint/parser': 6.21.0(eslint@8.56.0)(typescript@5.8.3) + eslint: 8.56.0 + eslint-config-standard-with-typescript: 40.0.0(@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.56.0)(typescript@5.8.3))(eslint@8.56.0)(typescript@5.8.3))(eslint-plugin-import@2.29.1(eslint@8.56.0))(eslint-plugin-n@15.7.0(eslint@8.56.0))(eslint-plugin-promise@6.1.1(eslint@8.56.0))(eslint@8.56.0)(typescript@5.8.3) + eslint-plugin-import: 2.29.1(eslint@8.56.0) + eslint-plugin-n: 15.7.0(eslint@8.56.0) + eslint-plugin-promise: 6.1.1(eslint@8.56.0) + jest: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)) + prettier: 3.2.5 + ts-jest: 29.1.2(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(jest@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)))(typescript@5.8.3) + typescript: 5.8.3 + uuid: 8.3.2 + zeromq: 6.5.0 + transitivePeerDependencies: + - '@babel/core' + - '@jest/types' + - babel-jest + - babel-plugin-macros + - esbuild + - node-notifier + - supports-color + - ts-node + + '@rush-temp/network@file:projects/network.tgz(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3))': + dependencies: + '@types/jest': 29.5.12 + '@types/node': 22.15.29 + '@types/uuid': 8.3.4 + '@typescript-eslint/eslint-plugin': 6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.56.0)(typescript@5.8.3))(eslint@8.56.0)(typescript@5.8.3) + '@typescript-eslint/parser': 6.21.0(eslint@8.56.0)(typescript@5.8.3) + eslint: 8.56.0 + eslint-config-standard-with-typescript: 40.0.0(@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.56.0)(typescript@5.8.3))(eslint@8.56.0)(typescript@5.8.3))(eslint-plugin-import@2.29.1(eslint@8.56.0))(eslint-plugin-n@15.7.0(eslint@8.56.0))(eslint-plugin-promise@6.1.1(eslint@8.56.0))(eslint@8.56.0)(typescript@5.8.3) + eslint-plugin-import: 2.29.1(eslint@8.56.0) + eslint-plugin-n: 15.7.0(eslint@8.56.0) + eslint-plugin-promise: 6.1.1(eslint@8.56.0) + jest: 29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)) + prettier: 3.2.5 + simplytyped: 3.3.0(typescript@5.8.3) + ts-jest: 29.1.2(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(jest@29.7.0(@types/node@22.15.29)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3)))(typescript@5.8.3) + typescript: 5.8.3 + uuid: 8.3.2 + transitivePeerDependencies: + - '@babel/core' + - '@jest/types' + - babel-jest + - babel-plugin-macros + - esbuild + - node-notifier + - supports-color + - ts-node + '@rush-temp/notification-assets@file:projects/notification-assets.tgz(@babel/core@7.23.9)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.23.9))(esbuild@0.24.2)(ts-node@10.9.2(@swc/core@1.13.5)(@types/node@22.15.29)(typescript@5.8.3))': dependencies: '@types/jest': 29.5.12 @@ -31137,7 +31226,7 @@ snapshots: '@smithy/types': 4.1.0 '@smithy/util-config-provider': 4.0.0 '@smithy/util-middleware': 4.0.1 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/core@3.1.2': dependencies: @@ -31148,7 +31237,7 @@ snapshots: '@smithy/util-middleware': 4.0.1 '@smithy/util-stream': 4.0.2 '@smithy/util-utf8': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/credential-provider-imds@4.0.1': dependencies: @@ -31156,7 +31245,7 @@ snapshots: '@smithy/property-provider': 4.0.1 '@smithy/types': 4.1.0 '@smithy/url-parser': 4.0.1 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/eventstream-codec@4.0.1': dependencies: @@ -31194,7 +31283,7 @@ snapshots: '@smithy/querystring-builder': 4.0.1 '@smithy/types': 4.1.0 '@smithy/util-base64': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/hash-blob-browser@4.0.1': dependencies: @@ -31208,7 +31297,7 @@ snapshots: '@smithy/types': 4.1.0 '@smithy/util-buffer-from': 4.0.0 '@smithy/util-utf8': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/hash-stream-node@4.0.1': dependencies: @@ -31219,7 +31308,7 @@ snapshots: '@smithy/invalid-dependency@4.0.1': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/is-array-buffer@2.2.0': dependencies: @@ -31227,7 +31316,7 @@ snapshots: '@smithy/is-array-buffer@4.0.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/md5-js@4.0.1': dependencies: @@ -31239,7 +31328,7 @@ snapshots: dependencies: '@smithy/protocol-http': 5.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/middleware-endpoint@4.0.3': dependencies: @@ -31250,7 +31339,7 @@ snapshots: '@smithy/types': 4.1.0 '@smithy/url-parser': 4.0.1 '@smithy/util-middleware': 4.0.1 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/middleware-retry@4.0.4': dependencies: @@ -31261,25 +31350,25 @@ snapshots: '@smithy/types': 4.1.0 '@smithy/util-middleware': 4.0.1 '@smithy/util-retry': 4.0.1 - tslib: 2.8.1 + tslib: 2.7.0 uuid: 9.0.1 '@smithy/middleware-serde@4.0.2': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/middleware-stack@4.0.1': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/node-config-provider@4.0.1': dependencies: '@smithy/property-provider': 4.0.1 '@smithy/shared-ini-file-loader': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/node-http-handler@4.0.2': dependencies: @@ -31292,12 +31381,12 @@ snapshots: '@smithy/property-provider@4.0.1': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/protocol-http@5.0.1': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/querystring-builder@4.0.1': dependencies: @@ -31308,7 +31397,7 @@ snapshots: '@smithy/querystring-parser@4.0.1': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/service-error-classification@4.0.1': dependencies: @@ -31317,7 +31406,7 @@ snapshots: '@smithy/shared-ini-file-loader@4.0.1': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/signature-v4@5.0.1': dependencies: @@ -31328,7 +31417,7 @@ snapshots: '@smithy/util-middleware': 4.0.1 '@smithy/util-uri-escape': 4.0.0 '@smithy/util-utf8': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/smithy-client@4.1.3': dependencies: @@ -31338,31 +31427,31 @@ snapshots: '@smithy/protocol-http': 5.0.1 '@smithy/types': 4.1.0 '@smithy/util-stream': 4.0.2 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/types@4.1.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/url-parser@4.0.1': dependencies: '@smithy/querystring-parser': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-base64@4.0.0': dependencies: '@smithy/util-buffer-from': 4.0.0 '@smithy/util-utf8': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-body-length-browser@4.0.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-body-length-node@4.0.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-buffer-from@2.2.0': dependencies: @@ -31372,11 +31461,11 @@ snapshots: '@smithy/util-buffer-from@4.0.0': dependencies: '@smithy/is-array-buffer': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-config-provider@4.0.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-defaults-mode-browser@4.0.4': dependencies: @@ -31384,7 +31473,7 @@ snapshots: '@smithy/smithy-client': 4.1.3 '@smithy/types': 4.1.0 bowser: 2.11.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-defaults-mode-node@4.0.4': dependencies: @@ -31394,28 +31483,28 @@ snapshots: '@smithy/property-provider': 4.0.1 '@smithy/smithy-client': 4.1.3 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-endpoints@3.0.1': dependencies: '@smithy/node-config-provider': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-hex-encoding@4.0.0': dependencies: - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-middleware@4.0.1': dependencies: '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-retry@4.0.1': dependencies: '@smithy/service-error-classification': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-stream@4.0.2': dependencies: @@ -31426,7 +31515,7 @@ snapshots: '@smithy/util-buffer-from': 4.0.0 '@smithy/util-hex-encoding': 4.0.0 '@smithy/util-utf8': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-uri-escape@4.0.0': dependencies: @@ -31435,18 +31524,18 @@ snapshots: '@smithy/util-utf8@2.3.0': dependencies: '@smithy/util-buffer-from': 2.2.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-utf8@4.0.0': dependencies: '@smithy/util-buffer-from': 4.0.0 - tslib: 2.8.1 + tslib: 2.7.0 '@smithy/util-waiter@4.0.2': dependencies: '@smithy/abort-controller': 4.0.1 '@smithy/types': 4.1.0 - tslib: 2.8.1 + tslib: 2.7.0 '@swc/core-darwin-arm64@1.13.5': optional: true @@ -32455,7 +32544,7 @@ snapshots: graphemer: 1.4.0 ignore: 5.3.1 natural-compare-lite: 1.4.0 - semver: 7.7.2 + semver: 7.6.3 tsutils: 3.21.0(typescript@5.8.3) optionalDependencies: typescript: 5.8.3 @@ -32552,7 +32641,7 @@ snapshots: debug: 4.4.0 globby: 11.1.0 is-glob: 4.0.3 - semver: 7.7.2 + semver: 7.6.3 tsutils: 3.21.0(typescript@5.8.3) optionalDependencies: typescript: 5.8.3 @@ -32584,7 +32673,7 @@ snapshots: '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.8.3) eslint: 8.56.0 eslint-scope: 5.1.1 - semver: 7.7.2 + semver: 7.6.3 transitivePeerDependencies: - supports-color - typescript @@ -32932,7 +33021,7 @@ snapshots: minimatch: 10.0.1 resedit: 1.7.1 sanitize-filename: 1.6.3 - semver: 7.7.2 + semver: 7.6.3 tar: 6.2.0 temp-file: 3.4.0 transitivePeerDependencies: @@ -33095,7 +33184,7 @@ snapshots: async-mutex@0.3.2: dependencies: - tslib: 2.8.1 + tslib: 2.7.0 async@3.2.5: {} @@ -33288,7 +33377,7 @@ snapshots: braces@3.0.2: dependencies: - fill-range: 7.1.1 + fill-range: 7.0.1 braces@3.0.3: dependencies: @@ -33394,7 +33483,7 @@ snapshots: builtins@5.0.1: dependencies: - semver: 7.7.2 + semver: 7.6.0 busboy@1.6.0: dependencies: @@ -33455,7 +33544,7 @@ snapshots: camel-case@4.1.2: dependencies: pascal-case: 3.1.2 - tslib: 2.8.1 + tslib: 2.6.2 camelcase-css@2.0.1: {} @@ -33620,6 +33709,8 @@ snapshots: clone@1.0.4: {} + cmake-ts@1.0.2: {} + co-body@6.1.0: dependencies: inflation: 2.1.0 @@ -33747,7 +33838,7 @@ snapshots: json-schema-typed: 7.0.3 onetime: 5.1.2 pkg-up: 3.1.0 - semver: 7.7.2 + semver: 7.6.3 confbox@0.1.8: {} @@ -35441,6 +35532,10 @@ snapshots: filesize@8.0.7: {} + fill-range@7.0.1: + dependencies: + to-regex-range: 5.0.1 + fill-range@7.1.1: dependencies: to-regex-range: 5.0.1 @@ -35762,7 +35857,7 @@ snapshots: es6-error: 4.1.1 matcher: 3.0.0 roarr: 2.15.4 - semver: 7.7.2 + semver: 7.6.3 serialize-error: 7.0.1 optional: true @@ -36100,7 +36195,7 @@ snapshots: http-proxy: 1.18.1 is-glob: 4.0.3 is-plain-obj: 3.0.0 - micromatch: 4.0.8 + micromatch: 4.0.5 optionalDependencies: '@types/express': 4.17.21 transitivePeerDependencies: @@ -36461,7 +36556,7 @@ snapshots: '@babel/parser': 7.23.9 '@istanbuljs/schema': 0.1.3 istanbul-lib-coverage: 3.2.2 - semver: 7.7.2 + semver: 7.6.3 transitivePeerDependencies: - supports-color @@ -36577,7 +36672,7 @@ snapshots: jest-runner: 29.7.0 jest-util: 29.7.0 jest-validate: 29.7.0 - micromatch: 4.0.8 + micromatch: 4.0.5 parse-json: 5.2.0 pretty-format: 29.7.0 slash: 3.0.0 @@ -36652,7 +36747,7 @@ snapshots: jest-regex-util: 29.6.3 jest-util: 29.7.0 jest-worker: 29.7.0 - micromatch: 4.0.8 + micromatch: 4.0.5 walker: 1.0.8 optionalDependencies: fsevents: 2.3.3 @@ -36676,7 +36771,7 @@ snapshots: '@types/stack-utils': 2.0.3 chalk: 4.1.2 graceful-fs: 4.2.11 - micromatch: 4.0.8 + micromatch: 4.0.5 pretty-format: 29.7.0 slash: 3.0.0 stack-utils: 2.0.6 @@ -36786,7 +36881,7 @@ snapshots: jest-util: 29.7.0 natural-compare: 1.4.0 pretty-format: 29.7.0 - semver: 7.7.2 + semver: 7.6.3 transitivePeerDependencies: - supports-color @@ -36966,7 +37061,7 @@ snapshots: lodash.isstring: 4.0.1 lodash.once: 4.1.1 ms: 2.1.3 - semver: 7.7.2 + semver: 7.6.3 jsx-ast-utils@3.3.5: dependencies: @@ -37451,7 +37546,7 @@ snapshots: make-dir@4.0.0: dependencies: - semver: 7.7.2 + semver: 7.6.3 make-error@1.3.6: {} @@ -37590,7 +37685,7 @@ snapshots: micromatch@4.0.5: dependencies: - braces: 3.0.3 + braces: 3.0.2 picomatch: 2.3.1 micromatch@4.0.8: @@ -37838,16 +37933,18 @@ snapshots: node-abi@3.55.0: dependencies: - semver: 7.7.2 + semver: 7.6.3 node-abort-controller@3.1.1: {} node-addon-api@1.7.2: optional: true + node-addon-api@8.5.0: {} + node-api-version@0.2.0: dependencies: - semver: 7.7.2 + semver: 7.6.3 node-cron@3.0.3: dependencies: @@ -37882,7 +37979,7 @@ snapshots: nopt: 6.0.0 npmlog: 6.0.2 rimraf: 3.0.2 - semver: 7.7.2 + semver: 7.6.3 tar: 6.2.0 which: 2.0.2 transitivePeerDependencies: @@ -38201,7 +38298,7 @@ snapshots: param-case@3.0.4: dependencies: dot-case: 3.0.4 - tslib: 2.8.1 + tslib: 2.6.2 parent-module@1.0.1: dependencies: @@ -39345,7 +39442,7 @@ snapshots: simple-update-notifier@2.0.0: dependencies: - semver: 7.7.2 + semver: 7.6.3 simplytyped@3.3.0(typescript@5.8.3): dependencies: @@ -39736,7 +39833,7 @@ snapshots: methods: 1.1.2 mime: 2.6.0 qs: 6.11.2 - semver: 7.7.2 + semver: 7.6.3 transitivePeerDependencies: - supports-color @@ -40098,7 +40195,7 @@ snapshots: json5: 2.2.3 lodash.memoize: 4.1.2 make-error: 1.3.6 - semver: 7.7.2 + semver: 7.6.0 typescript: 5.8.3 yargs-parser: 21.1.1 optionalDependencies: @@ -40982,6 +41079,11 @@ snapshots: zen-observable@0.8.15: {} + zeromq@6.5.0: + dependencies: + cmake-ts: 1.0.2 + node-addon-api: 8.5.0 + zip-stream@6.0.1: dependencies: archiver-utils: 5.0.2 diff --git a/communication b/communication index a8d58f18bb6..9673971e925 160000 --- a/communication +++ b/communication @@ -1 +1 @@ -Subproject commit a8d58f18bb68e4cd4e3a97526af11eb800fb9fc3 +Subproject commit 9673971e9257bd167074b78921de746184604289 diff --git a/network/README.md b/network/README.md new file mode 100644 index 00000000000..7044f036ca0 --- /dev/null +++ b/network/README.md @@ -0,0 +1,145 @@ +# Huly Virtual Network + +A distributed, scalable virtual network architecture for the Huly that enables fault-tolerant performance communication. + +## 🚀 Overview + +The Huly Virtual Network is a sophisticated distributed system designed to handle enterprise-scale workloads with the following key capabilities: + +- **Distributed Load Balancing**: Intelligent routing to key components across multiple phytsical nodes. +- **Multi-Tenant Architecture**: Secure isolation of containers for user sessions/search engines/transaction processors. +- **Fault Tolerance**: Automatic failover and recovery mechanisms +- **Scaling**: Horizontal and vertical service scalling. +- **Real-time Communication**: Event-driven architecture with broadcast capabilities between containers. + +## Network Architecture Components + +### Agents & Containers + +Agent is a top of the rock for containers. Containers are work horses for any application build on top of Huly Network, they could be started, located and used. +Every container could be located by its {kind + uuid} or using a labeling system. After being found anyone could send a message to container and ask for some activity. + +Container's could be communicated two ways: + +1. Anyone could send a message to container using network. +2. A request/response connection could be established to container using network. + +Network provide a references to containers, if container is references it will be active until reference is exists. If there is no references to contaienr, it will be keept for some timeout and closed. + +Agents provide a list of container kinds they support to be started to Huly Network, manage them and provide live/monitoring and networking capabilities. +Communication to containers are managed by Huly Network. + +```mermaid +flowchart BT + subgraph Agent["Network Agent"] + subgraph _TX["Transactor"] + TX1["ws1"] + TX2["ws2"] + end + subgraph _VMM["Virtual Machine"] + AG1["Hulia Agent"] + AG2["James Agent"] + end + subgraph _Sessions["Session"] + S1["user1"] + S2["user2"] + end + subgraph _Query["Query Engine"] + Q1["Europe"] + Q3["ws3"] + end + end + subgraph Agent2["Network Agent2"] + subgraph _TX_2["Transactor"] + TX3["ws3"] + TX4["ws4"] + end + subgraph _Query_2["Query Engine"] + Q1_2["Europe"] + Q2_2["America"] + end + end + + subgraph NET["Huly Network"] + Containers["Containers"] + Agents["Agents"] + end + + + S1 -.-> TX1 & Q1 & Q2_2 & TX4 + Agent --> NET + Agent2 --> NET + AG1@{ shape: stored-data} + AG2@{ shape: stored-data} + S1@{ shape: h-cyl} + S2@{ shape: h-cyl} + style Agent stroke:#00C853 +``` + +## Building Huly on top of Huly Network + +Huly could be managed by following set of container kinds, `session`, `query`, `transactor`. + +- session -> a map/reduce/find executor for queries and transactions from client. +- query -> a DB query engine, execute `find` requests from session and pass them to DB, allow to search for all data per region. Should have access to tables of account -> workspace mapping for security. +- transactor -> modification archestrator for all edit operations, do them one by one. + +```mermaid +flowchart + Endpoint -.->| + connect + session/user1 + |HulyNetwork[Huly Network] + + Endpoint <-->|find,tx| parsonal-ws:user1 + + parsonal-ws:user1 -..->|get-workspace info| DatalakeDB + + parsonal-ws:user1 -..->|find| query:europe + + query:europe -..->|resp| parsonal-ws:user1 + + parsonal-ws:user1 -..->|response chunks| Endpoint + + parsonal-ws:user1 -..->|tx| transactor:ws1 + + transactor:ws1 -..->|event's| HulyPulse + + HulyPulse <--> Client + + Client <--> Endpoint + + query:europe -..->|"update"| QueryDB + transactor:ws1 -..->|update| DatalakeDB + + transactor:ws1 -..->|txes| Kafka[Output Queue] + + Kafka -..-> Indexer[Structure + + Fulltext Index] + + Indexer -..-> QueryDB + + Indexer -..->|indexed tx| HulyPulse + + Kafka -..-> AsyncTriggers + + AsyncTriggers -..->|find| query:europe + + AsyncTriggers -..->|derived txes| transactor:ws1 + + InputQueue -->|txes| transactor:ws1 + + Services[Services + Github/Telegram/Translate] -..-> InputQueue + + Kafka -..-> Services + + Services -..-> query:europe + + QueryDB@{shape: database} + InputQueue@{shape: database} + DatalakeDB@{shape: database} + Kafka@{shape: database} + parsonal-ws:user1@{ shape: h-cyl} + +``` diff --git a/network/core/.eslintrc.js b/network/core/.eslintrc.js new file mode 100644 index 00000000000..ce90fb9646f --- /dev/null +++ b/network/core/.eslintrc.js @@ -0,0 +1,7 @@ +module.exports = { + extends: ['./node_modules/@hcengineering/platform-rig/profiles/node/eslint.config.json'], + parserOptions: { + tsconfigRootDir: __dirname, + project: './tsconfig.json' + } +} diff --git a/network/core/config/rig.json b/network/core/config/rig.json new file mode 100644 index 00000000000..78cc5a17334 --- /dev/null +++ b/network/core/config/rig.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://developer.microsoft.com/json-schemas/rig-package/rig.schema.json", + "rigPackageName": "@hcengineering/platform-rig", + "rigProfile": "node" +} diff --git a/network/core/docs/alive-checkins.md b/network/core/docs/alive-checkins.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/core/jest.config.js b/network/core/jest.config.js new file mode 100644 index 00000000000..2cfd408b679 --- /dev/null +++ b/network/core/jest.config.js @@ -0,0 +1,7 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + testMatch: ['**/?(*.)+(spec|test).[jt]s?(x)'], + roots: ["./src"], + coverageReporters: ["text-summary", "html"] +} diff --git a/network/core/package.json b/network/core/package.json new file mode 100644 index 00000000000..9190e5f2e50 --- /dev/null +++ b/network/core/package.json @@ -0,0 +1,53 @@ +{ + "name": "@hcengineering/network", + "version": "0.6.32", + "main": "lib/index.js", + "svelte": "src/index.ts", + "types": "types/index.d.ts", + "author": "Huly Platform Contributors", + "template": "@hcengineering/node-package", + "license": "EPL-2.0", + "scripts": { + "build": "compile", + "build:watch": "compile", + "test": "jest --passWithNoTests --silent --forceExit", + "format": "format src", + "_phase:build": "compile transpile src", + "_phase:test": "jest --passWithNoTests --silent --forceExit", + "_phase:format": "format src", + "_phase:validate": "compile validate" + }, + "devDependencies": { + "@hcengineering/platform-rig": "^0.6.0", + "@typescript-eslint/eslint-plugin": "^6.11.0", + "eslint-plugin-import": "^2.26.0", + "eslint-plugin-promise": "^6.1.1", + "eslint-plugin-n": "^15.4.0", + "eslint": "^8.54.0", + "simplytyped": "^3.3.0", + "@typescript-eslint/parser": "^6.11.0", + "eslint-config-standard-with-typescript": "^40.0.0", + "prettier": "^3.1.0", + "typescript": "^5.8.3", + "jest": "^29.7.0", + "ts-jest": "^29.1.1", + "@types/jest": "^29.5.5", + "@types/node": "^22.15.29", + "@types/uuid": "^8.3.1" + }, + "dependencies": { + "@hcengineering/analytics": "^0.6.0", + "uuid": "^8.3.2" + }, + "repository": "https://github.com/hcengineering/platform", + "publishConfig": { + "registry": "https://npm.pkg.github.com" + }, + "exports": { + ".": { + "types": "./types/index.d.ts", + "require": "./lib/index.js", + "import": "./lib/index.js" + } + } +} diff --git a/network/core/src/__test__/alive-checkins.spec.ts b/network/core/src/__test__/alive-checkins.spec.ts new file mode 100644 index 00000000000..8e9321bcd98 --- /dev/null +++ b/network/core/src/__test__/alive-checkins.spec.ts @@ -0,0 +1,44 @@ +import type { AgentUuid } from '../api/net' +import { timeouts } from '../api/timeouts' +import { AgentImpl, NetworkImpl } from '../net' +import { TickManagerImpl } from '../utils' + +class FakeTickManager extends TickManagerImpl { + time: number = 0 + + now (): number { + return this.time + } +} + +const agents = { + agent1: 'agent1' as AgentUuid, + agent2: 'agent2' as AgentUuid, + agent3: 'agent3' as AgentUuid +} + +describe('alive checkins tests', () => { + it('should mark and remove agent if not alive', async () => { + const tickManager = new FakeTickManager(1) // 1 tick per second + const network = new NetworkImpl(tickManager) + const agent1 = new AgentImpl(agents.agent1, {}) + + // Register agent + await agent1.register(network) + + // Get initial lastSeen time + const agentRecord = (await network.agents()).find(a => a.agentId === agents.agent1) + expect(agentRecord).toBeDefined() + + // Make looks like agent is not alive anymore + tickManager.time += 1000 + timeouts.aliveTimeout * 1000 + + // Trigger alive callback + await (network as any).checkAlive() + + // Check that lastSeen was updated (we can't easily test the exact timestamp, + // but we can verify the agent is still in the network) + const updatedAgentRecord = (await network.agents()).find(a => a.agentId === agents.agent1) + expect(updatedAgentRecord).toBeUndefined() + }) +}) diff --git a/network/core/src/__test__/network.spec.ts b/network/core/src/__test__/network.spec.ts new file mode 100644 index 00000000000..012e123b85c --- /dev/null +++ b/network/core/src/__test__/network.spec.ts @@ -0,0 +1,72 @@ +import type { AgentUuid, ClientUuid, ContainerEndpointRef, ContainerKind } from '../api/net' +import { AgentImpl, NetworkImpl, type Container } from '../net' +import { TickManagerImpl } from '../utils' + +// class DummyConnectionManager implements ConnectionManager { +// async connect (endpoint: ContainerEndpointRef): Promise { +// throw new Error('Method not implemented.') +// } +// } + +class DummyContainer implements Container { + lastVisit: number = 0 + onTerminated?: (() => void) | undefined + + async request (operation: string, data: any): Promise { + return undefined + } + + async terminate (): Promise {} + + async ping (): Promise { + this.lastVisit = Date.now() + } + + connect (clientId: ClientUuid, handler: (data: any) => Promise): void { + + } + + disconnect (clientId: ClientUuid): void { + + } +} + +const agents = { + agent1: 'agent1' as AgentUuid, + agent2: 'agent2' as AgentUuid, + agent3: 'agent3' as AgentUuid +} + +const kinds: Record = { + session: 'session' as ContainerKind +} + +describe('network tests', () => { + it('check register agent', async () => { + const tickManager = new TickManagerImpl(1) // 1 tick per second + const network = new NetworkImpl(tickManager) + + const agent1 = new AgentImpl(agents.agent1, {}) + await agent1.register(network) + + expect((network as any)._agents.size).toBe(1) + }) + + it('start container', async () => { + const tickManager = new TickManagerImpl(1) // 1 tick per second + const network = new NetworkImpl(tickManager) + + const agent1 = new AgentImpl( + agents.agent1, + { + [kinds.session]: () => + Promise.resolve([new DummyContainer(), '' as ContainerEndpointRef]) + } + ) + await agent1.register(network) + expect((network as any)._agents.size).toBe(1) + + const s1 = await network.get(agents.agent1 as any, 's1' as any, { kind: kinds.session }) + expect(s1).toBeDefined() + }) +}) diff --git a/network/core/src/__test__/samples.ts b/network/core/src/__test__/samples.ts new file mode 100644 index 00000000000..337d39ad8cb --- /dev/null +++ b/network/core/src/__test__/samples.ts @@ -0,0 +1,27 @@ +import type { AccountUuid, WorkspaceUuid } from '../api/types' +import { StaticWorkspaceDiscovery } from '../discovery/static' + +export const workspaces = { + ws1: 'ws1' as WorkspaceUuid, + ws2: 'ws2' as WorkspaceUuid, + ws3: 'ws3' as WorkspaceUuid, + ws4: 'ws4' as WorkspaceUuid, + ws5: 'ws5' as WorkspaceUuid, + ws6: 'ws6' as WorkspaceUuid, + ws7: 'ws7' as WorkspaceUuid, + ws8: 'ws8' as WorkspaceUuid, + ws9: 'ws9' as WorkspaceUuid, + ws10: 'ws10' as WorkspaceUuid +} + +export const users = { + user1: 'user1' as AccountUuid, + user2: 'user2' as AccountUuid +} + +export const wsDiscovery = new StaticWorkspaceDiscovery({ + [users.user1]: [workspaces.ws1, workspaces.ws2, workspaces.ws3], + [users.user2]: [workspaces.ws4, workspaces.ws5, workspaces.ws6], + [workspaces.ws1]: [workspaces.ws7, workspaces.ws8], + [workspaces.ws8]: [workspaces.ws9, workspaces.ws10] +}) diff --git a/network/core/src/__test__/tickMgr.spec.ts b/network/core/src/__test__/tickMgr.spec.ts new file mode 100644 index 00000000000..baf9c360a33 --- /dev/null +++ b/network/core/src/__test__/tickMgr.spec.ts @@ -0,0 +1,28 @@ +import { TickManagerImpl } from '../utils' + +describe('check tickManager', () => { + it('check ticks', async () => { + const mgr = new TickManagerImpl(20) + const h1 = mgr.nextHash() + + // await mgr.tick() + expect(mgr.isMe(h1, 1)).toBe(true) + expect(mgr.isMe(h1, 2)).toBe(true) + expect(mgr.isMe(h1, 3)).toBe(true) + + ;(mgr as any)._tick = 20 + expect(mgr.isMe(h1, 1)).toBe(true) + expect(mgr.isMe(h1, 2)).toBe(false) + expect(mgr.isMe(h1, 3)).toBe(false) + + ;(mgr as any)._tick = 40 + expect(mgr.isMe(h1, 1)).toBe(true) + expect(mgr.isMe(h1, 2)).toBe(true) + expect(mgr.isMe(h1, 3)).toBe(false) + + ;(mgr as any)._tick = 60 + expect(mgr.isMe(h1, 1)).toBe(true) + expect(mgr.isMe(h1, 2)).toBe(false) + expect(mgr.isMe(h1, 3)).toBe(true) + }) +}) \ No newline at end of file diff --git a/network/core/src/api/discovery.ts b/network/core/src/api/discovery.ts new file mode 100644 index 00000000000..b1e13de6d6c --- /dev/null +++ b/network/core/src/api/discovery.ts @@ -0,0 +1,11 @@ +import type { AccountUuid, WorkspaceUuid } from './types' + +export interface WorkspaceDiscovery { + byAccount: (account: AccountUuid) => Promise + + byWorkspace: (workspace: WorkspaceUuid) => Promise +} + +export interface AccountDiscovery { + byWorkspace: (workspace: WorkspaceUuid) => Promise +} diff --git a/network/core/src/api/net.ts b/network/core/src/api/net.ts new file mode 100644 index 00000000000..4963f956fff --- /dev/null +++ b/network/core/src/api/net.ts @@ -0,0 +1,192 @@ +import type { Container } from '../net' + +export type ContainerUuid = string & { _containerUuid: true } +export type ContainerKind = string & { _containerKind: true } +export type AgentUuid = string & { _networkAgentUuid: true } +export type ClientUuid = string & { _networkClientUuid: true } +export type ContainerEndpointRef = string & { _containerEndpointRef: true } +export type AgentEndpointRef = string & { _agentEndpointRef: true } + +export interface ContainerRecord { + agentId: AgentUuid + uuid: ContainerUuid + kind: ContainerKind + endpoint: ContainerEndpointRef + lastVisit: number // Last time when container was visited + + // Last request used + extra?: Record // Extra parameters for container start + + labels?: string[] +} + +export interface AgentRecord { + agentId: AgentUuid + + // If endpoint is not sepecified, container send will be passthrought via network connection. + // Individal containers still could have connections. + endpoint?: AgentEndpointRef + + // A change to containers + containers: ContainerRecord[] + kinds: ContainerKind[] +} + +export interface ContainerRequest { + kind: ContainerKind + extra?: Record // Extra parameters for container start + + labels?: string[] +} + +/** + * Interface to Huly network on server. + */ +export interface Network { + /* + * Register or reregister agent in network. + * On every network restart agent should reconnect to network. + */ + register: (record: AgentRecord, agent: NetworkAgent) => Promise + + // Mark an agent as alive (updates lastSeen timestamp) + ping: (agentId: AgentUuid | ClientUuid) => void + + agents: () => Promise + + // A full uniq set of supported container kinds. + kinds: () => Promise + + /* + * Get/Start of required container kind on agent + * Will start a required container on agent, if not already started. + */ + get: (client: ClientUuid, uuid: ContainerUuid, request: ContainerRequest) => Promise + + /** + * Release a container for a client, if container is not used anymore it will be shutdown with a shutdown delay. + */ + release: (client: ClientUuid, uuid: ContainerUuid) => Promise + + list: (kind: ContainerKind) => Promise + + // Send some data to container, using proxy connection. + request: (target: ContainerUuid, operation: string, data?: any) => Promise +} + +export interface NetworkWithClients { + addClient: (clientUuid: ClientUuid, onContainer?: (event: ContainerEvent) => Promise) => void + removeClient: (clientUuid: ClientUuid) => void +} + +export type ContainerUpdateListener = (event: ContainerEvent) => Promise + +/** + * Interface to Huly network. + * + * Identification is generated during instantions of client. + */ +export interface NetworkClient { + /* + * Register or a NetworkAgent API to be processed by network. + * On every network change restart agent register method will be called. + */ + register: (agent: NetworkAgent) => Promise + + agents: () => Promise + + // A full uniq set of supported container kinds. + kinds: () => Promise + + /* + * Get/Start of required container kind on agent + * Will start a required container on agent, if not already started. + */ + get: (uuid: ContainerUuid, request: ContainerRequest) => Promise + + list: (kind: ContainerKind) => Promise + + // Send some data to container, using proxy connection. + request: (target: ContainerUuid, operation: string, data?: any) => Promise + + // Register on container update listener + onContainerUpdate: (listener: ContainerUpdateListener) => void + + close: () => Promise +} + +export interface ConnectionManager { + connect: (endpoint: ContainerEndpointRef) => Promise +} + +/** + * A client reference to container, until closed, client will notify network about container is still required. + */ +export interface ContainerReference { + uuid: ContainerUuid + + endpoint: ContainerEndpointRef + + close: () => Promise + + connect: (timeout?: number) => Promise + + request: (operation: string, data?: any) => Promise +} + +export interface ContainerEvent { + added: ContainerRecord[] + deleted: ContainerRecord[] + updated: ContainerRecord[] +} + +/** + * Interface to Huly Agent on agent. + */ +export interface NetworkAgent { + // Agent uniq identigier, should be same on agent restarts. + uuid: AgentUuid + + // Agent connection endpoint. + endpoint?: AgentEndpointRef + + // A supported set of container kinds supported to be managed by the agent + kinds: ContainerKind[] + + // event handled from agent to network events. + onUpdate?: (event: ContainerEvent) => Promise + + // Send agent update info to network, if applicable. + onAgentUpdate?: () => Promise + + // Get/Start of required container kind on agent + get: (uuid: ContainerUuid, request: ContainerRequest) => Promise + + // A low level reference to container + getContainer: (uuid: ContainerUuid) => Promise + + // List of active containers + list: (kind?: ContainerKind) => Promise + + // Send some data to container + request: (target: ContainerUuid, operation: string, data?: any) => Promise + + // ask for immediate termination for container + terminate: (container: ContainerEndpointRef) => Promise +} + +// A request/reponse interface to container. +export interface ContainerConnection { + containerId: ContainerUuid + + // A simple request/response to container. + request: (operation: string, data?: any) => Promise + + // A chunk streaming of results + // stream: (data: any) => Iterable + + // Recieve not a requests but also any kind of notifications. + on?: (data: any) => Promise + + close: () => Promise +} diff --git a/network/core/src/api/timeouts.ts b/network/core/src/api/timeouts.ts new file mode 100644 index 00000000000..108f38589cb --- /dev/null +++ b/network/core/src/api/timeouts.ts @@ -0,0 +1,4 @@ +export const timeouts = { + aliveTimeout: 3, // seconds - timeout for detecting dead agents + pingInterval: 1 // seconds - how often to ping agents +} diff --git a/network/core/src/api/types.ts b/network/core/src/api/types.ts new file mode 100644 index 00000000000..a43d45fce2e --- /dev/null +++ b/network/core/src/api/types.ts @@ -0,0 +1,9 @@ +/** + * A unique identifier for a workspace. + */ +export type WorkspaceUuid = string & { __workspaceUuid: true } + +/** + * A unique identifier for an account. + */ +export type AccountUuid = string & { __accountUuid: true } diff --git a/network/core/src/api/utils.ts b/network/core/src/api/utils.ts new file mode 100644 index 00000000000..30dbf289b0e --- /dev/null +++ b/network/core/src/api/utils.ts @@ -0,0 +1,14 @@ +export type TickHandler = () => void | Promise + +export interface TickManager { + now: () => number + + // Interval in seconds + register: (handler: TickHandler, interval: number) => void + + // Start tick manager + start: () => void + + // Stop tick manager + stop: () => void +} diff --git a/network/core/src/discovery/static.ts b/network/core/src/discovery/static.ts new file mode 100644 index 00000000000..265dda52c5d --- /dev/null +++ b/network/core/src/discovery/static.ts @@ -0,0 +1,14 @@ +import type { WorkspaceDiscovery } from '../api/discovery' +import type { AccountUuid, WorkspaceUuid } from '../api/types' + +export class StaticWorkspaceDiscovery implements WorkspaceDiscovery { + constructor (private readonly workspaces: Record) {} + + async byAccount (account: AccountUuid): Promise { + return this.workspaces[account] ?? [] + } + + async byWorkspace (workspace: WorkspaceUuid): Promise { + return this.workspaces[workspace] ?? [] + } +} diff --git a/network/core/src/index.ts b/network/core/src/index.ts new file mode 100644 index 00000000000..ac33b5392e9 --- /dev/null +++ b/network/core/src/index.ts @@ -0,0 +1,8 @@ +export * from './api/discovery' +export * from './api/types' +export * from './api/utils' +export * from './api/net' +export * from './api/timeouts' +export * from './discovery/static' +export * from './utils' +export * from './net/index' diff --git a/network/core/src/net/agent.ts b/network/core/src/net/agent.ts new file mode 100644 index 00000000000..40e5a13bd97 --- /dev/null +++ b/network/core/src/net/agent.ts @@ -0,0 +1,114 @@ +import type { + AgentEndpointRef, + AgentUuid, + ContainerEndpointRef, + ContainerKind, + ContainerRecord, + ContainerRequest, + ContainerUuid, + Network, + NetworkAgent +} from '../api/net' +import type { Container, ContainerFactory } from './containers' + +interface ContainerRecordImpl { + container: Container + uuid: ContainerUuid + endpoint: ContainerEndpointRef + kind: ContainerKind + + lastVisit: number +} + +export class AgentImpl implements NetworkAgent { + // Own, managed containers + private readonly _byId = new Map>() + + private readonly _containers = new Map() + + endpoint?: AgentEndpointRef | undefined + + constructor ( + readonly uuid: AgentUuid, + private readonly factory: Record + ) { + } + + async register (network: Network): Promise { + const cleanContainers = await network.register({ + agentId: this.uuid, + containers: await this.list(), + kinds: this.kinds, + endpoint: this.endpoint + }, this) + for (const c of cleanContainers) { + await this.terminate(c) + } + } + + async list (kind?: ContainerKind): Promise { + return Array.from(this._containers.values()) + .filter((it) => !(it instanceof Promise) && (kind === undefined || it.kind === kind)) + .map((it) => ({ + agentId: this.uuid, + uuid: it.uuid, + endpoint: it.endpoint, + kind: it.kind, + lastVisit: it.lastVisit + })) + } + + get kinds (): ContainerKind[] { + return Object.keys(this.factory) as ContainerKind[] + } + + async getContainerImpl (uuid: ContainerUuid): Promise { + let current = this._byId.get(uuid) + if (current instanceof Promise) { + current = await current + this._byId.set(uuid, current) + } + return current + } + + async getContainer (uuid: ContainerUuid): Promise { + return (await this.getContainerImpl(uuid))?.container + } + + async get (uuid: ContainerUuid, request: ContainerRequest): Promise { + const current = await this.getContainerImpl(uuid) + if (current !== undefined) { + return current.endpoint + } + + let container: ContainerRecordImpl | Promise = this.factory[request.kind](uuid, request).then(r => ({ + container: r[0], + endpoint: r[1], + kind: request.kind, + lastVisit: Date.now(), + uuid + })) + this._byId.set(uuid, container) + container = await container + this._containers.set(container.endpoint, container) + this._byId.set(uuid, container) + + return container.endpoint + } + + async terminate (endpoint: ContainerEndpointRef): Promise { + const current = this._containers.get(endpoint) + if (current !== undefined) { + this._containers.delete(endpoint) + await current.container.terminate() + } + } + + async request (target: ContainerUuid, operation: string, data?: any): Promise { + const container = await this.getContainer(target) + if (container === undefined) { + throw new Error(`Container ${target} not found`) + } + return await container.request(operation, data) + } +} diff --git a/network/core/src/net/containers.ts b/network/core/src/net/containers.ts new file mode 100644 index 00000000000..993c6292747 --- /dev/null +++ b/network/core/src/net/containers.ts @@ -0,0 +1,24 @@ +import type { ClientUuid, ContainerEndpointRef, ContainerRecord, ContainerRequest, ContainerUuid } from '../api/net' + +export interface Container { + request: (operation: string, data?: any, clientId?: ClientUuid) => Promise + + // Called when the container is terminated + onTerminated?: () => void + + terminate: () => Promise + + ping: () => Promise + + connect: (clientId: ClientUuid, broadcast: (data: any) => Promise) => void + disconnect: (clientId: ClientUuid) => void +} + +export type ContainerFactory = (uuid: ContainerUuid, request: ContainerRequest) => Promise<[Container, ContainerEndpointRef]> + +export interface ContainerRecordImpl { + record: ContainerRecord + endpoint: ContainerEndpointRef | Promise + + clients: Set +} diff --git a/network/core/src/net/index.ts b/network/core/src/net/index.ts new file mode 100644 index 00000000000..fd00173a106 --- /dev/null +++ b/network/core/src/net/index.ts @@ -0,0 +1,9 @@ +import type { ContainerUuid } from '../api/net' + +export * from './containers' +export * from './network' +export * from './agent' + +export function composeCID (prefix: string, id: string): ContainerUuid { + return `${prefix}_${id}` as ContainerUuid +} diff --git a/network/core/src/net/network.ts b/network/core/src/net/network.ts new file mode 100644 index 00000000000..3d2f8741a3d --- /dev/null +++ b/network/core/src/net/network.ts @@ -0,0 +1,432 @@ +import type { + AgentEndpointRef, + AgentRecord, + AgentUuid, + ClientUuid, + ContainerEndpointRef, + ContainerEvent, + ContainerKind, + ContainerRecord, + ContainerRequest, + ContainerUuid, + Network, + NetworkAgent, + NetworkWithClients +} from '../api/net' +import { timeouts } from '../api/timeouts' +import type { TickManager } from '../api/utils' +import type { ContainerRecordImpl } from './containers' + +interface AgentRecordImpl { + api: NetworkAgent + containers: Map + endpoint?: AgentEndpointRef + kinds: ContainerKind[] + + lastSeen: number // Last time when agent was seen +} + +interface ClientRecordImpl { + lastSeen: number + containers: Set + onContainer?: (event: ContainerEvent) => Promise +} +/** + * Server network implementation. + */ +export class NetworkImpl implements Network, NetworkWithClients { + private idx: number = 0 + + private readonly _agents = new Map() + + private readonly _containers = new Map() + + private readonly _clients = new Map() + + private readonly _orphanedContainers = new Map() + + private eventQueue: ContainerEvent[] = [] + + constructor (private readonly tickManager: TickManager) { + // Register for periodic agent health checks + tickManager.register(() => { + void this.checkAlive().catch((err) => { + console.error('Error during network health check:', err) + }) + // Check for events on every tick + void this.sendEvents() + }, timeouts.aliveTimeout) + } + + async agents (): Promise { + return Array.from( + this._agents.values().map(({ api, containers }) => ({ + agentId: api.uuid, + endpoint: api.endpoint, + kinds: api.kinds, + containers: Object.values(containers).map(({ record }) => record) + })) + ) + } + + async kinds (): Promise { + return Array.from( + this._agents + .values() + .map((it) => it.kinds) + .flatMap((it) => it) + ) + } + + async list (kind: ContainerKind): Promise { + return Array.from(this._agents.values()) + .flatMap((it) => Array.from(it.containers.values())) + .filter((it) => it.record.kind === kind) + .map((it) => it.record) + } + + async request (target: ContainerUuid, operation: string, data?: any): Promise { + const agentId = this._containers.get(target) + if (agentId === undefined) { + throw new Error(`Container ${target} not found`) + } + const agent = this._agents.get(agentId) + if (agent === undefined) { + throw new Error(`Agent ${agentId} not found for container ${target}`) + } + const container = agent.containers.get(target) + if (container === undefined) { + throw new Error(`Container ${target} not registered on agent ${agentId}`) + } + return await agent.api.request(target, operation, data) + } + + async register (record: AgentRecord, agent: NetworkAgent): Promise { + const containers: ContainerRecord[] = record.containers + const newContainers = new Map( + containers.map((record) => [ + record.uuid, + { + record, + request: { kind: record.kind }, + endpoint: record.endpoint, + clients: new Set([]) + } + ]) + ) + + const containerEvent: ContainerEvent = { + added: [], + deleted: [], + updated: [] + } + + // Register agent record + const oldAgent = this._agents.get(record.agentId) + if (oldAgent !== undefined) { + // In case re-register or reconnect is happened. + // Check if some of container changed endpoints. + for (const rec of containers) { + const oldRec = oldAgent.containers.get(rec.uuid) + if (oldRec !== undefined) { + if (oldRec.record.endpoint !== rec.endpoint) { + oldRec.endpoint = rec.endpoint // Update endpoint + containerEvent.updated.push(rec) + } + } + } + // Handle remove of containers + for (const oldC of oldAgent.containers.values()) { + if (newContainers.get(oldC.record.uuid) === undefined) { + containerEvent.deleted.push(oldC.record) + this._containers.delete(oldC.record.uuid) // Remove from active container registry + } + } + } + + const containersToShutdown: ContainerEndpointRef[] = [] + + // Update active container registry. + for (const rec of containers) { + const oldAgentId = this._containers.get(rec.uuid) + if (oldAgentId === undefined) { + containerEvent.added.push(rec) + this._containers.set(rec.uuid, record.agentId) + } + if (oldAgentId !== record.agentId) { + containersToShutdown.push(rec.endpoint) + } + } + + // update agent record + + this._agents.set(record.agentId, { + api: agent, + containers: newContainers, + endpoint: record.endpoint, + kinds: record.kinds, + lastSeen: this.tickManager.now() + }) + + this.eventQueue.push(containerEvent) + + // Send notification to all agents about containers update. + return containersToShutdown + } + + async sendEvents (): Promise { + const events = [...this.eventQueue] + this.eventQueue = [] + if (events.length === 0) { + return + } + // Combine events + + const finalEvent: ContainerEvent = { + added: [], + deleted: [], + updated: [] + } + for (const event of events) { + finalEvent.added.push(...event.added) + finalEvent.deleted.push(...event.deleted) + finalEvent.updated.push(...event.updated) + } + + // Skip deleted events. + const deletedIds = finalEvent.deleted.map((c) => c.uuid) + finalEvent.added = finalEvent.added.filter((c) => !deletedIds.includes(c.uuid)) + finalEvent.updated = finalEvent.updated.filter((c) => !deletedIds.includes(c.uuid)) + + for (const [clientUuid, client] of Object.entries(this._clients)) { + if (client.onContainer !== undefined) { + try { + // We should not block on broadcast to clients. + void client.onContainer(finalEvent) + } catch (err: any) { + console.error(`Error in client ${clientUuid} onContainer callback:`, err) + } + } + } + } + + addClient (clientUuid: ClientUuid, onContainer?: (event: ContainerEvent) => Promise): void { + this._clients.set(clientUuid, { lastSeen: this.tickManager.now(), containers: new Set(), onContainer }) + } + + removeClient (clientUuid: ClientUuid): void { + this._clients.delete(clientUuid) + } + + async get (clientUuid: ClientUuid, uuid: ContainerUuid, request: ContainerRequest): Promise { + this.ping(clientUuid) + + let client = this._clients.get(clientUuid) + if (client === undefined) { + client = { lastSeen: this.tickManager.now(), containers: new Set() } + this._clients.set(clientUuid, client) + } + client.containers.add(uuid) + + const record = await this.getContainer(uuid, request, [clientUuid]) + if (record.endpoint instanceof Promise) { + return await record.endpoint + } + return record.endpoint + } + + async getContainer ( + uuid: ContainerUuid, + request: ContainerRequest, + clients: ClientUuid[] + ): Promise { + const existing = this._containers.get(uuid) + if (existing !== undefined) { + const agent = this._agents.get(existing) + const containerImpl = agent?.containers?.get(uuid) + if (containerImpl !== undefined) { + if (!(containerImpl.endpoint instanceof Promise)) { + this._orphanedContainers.delete(containerImpl.endpoint) + } + for (const cl of clients) { + containerImpl.clients.add(cl) + } + return containerImpl + } + } + // Select agent using round/robin and register it in agent + const agent = Array.from(this._agents.values())[++this.idx % this._agents.size] + + const record: ContainerRecordImpl = { + record: { + uuid, + agentId: agent.api.uuid, + kind: request.kind, + lastVisit: this.tickManager.now(), + endpoint: '' as ContainerEndpointRef, // Placeholder, will be updated later + labels: request.labels, + extra: request.extra + }, + clients: new Set(clients), + endpoint: agent.api.get(uuid, request) + } + agent.containers.set(uuid, record) + this._containers.set(uuid, agent.api.uuid) + + // Wait for endpoint to be established + try { + const endpointRef = await record.endpoint + record.endpoint = endpointRef + this.eventQueue.push({ + added: [record.record], + deleted: [], + updated: [] + }) + return record + } catch (err: any) { + this._containers.delete(uuid) // Remove from active container registry + throw new Error(`Failed to get endpoint for container ${uuid}: ${err.message}`) + } + } + + async release (client: ClientUuid, uuid: ContainerUuid): Promise { + const _client = this._clients.get(client) + _client?.containers.delete(uuid) + + const existing = this._containers.get(uuid) + if (existing !== undefined) { + const agent = this._agents.get(existing) + const containerImpl = agent?.containers?.get(uuid) + if (containerImpl !== undefined) { + containerImpl.clients.delete(client) + if (containerImpl.clients.size === 0 && !(containerImpl.endpoint instanceof Promise)) { + this._orphanedContainers.set(containerImpl.endpoint, containerImpl) + } + } + } + } + + async terminate (container: ContainerRecordImpl): Promise { + this._containers.delete(container.record.uuid) // Remove from active container registry + this.eventQueue.push({ + added: [], + deleted: [container.record], + updated: [] + }) + const agent = this._agents.get(container.record.agentId) + agent?.containers.delete(container.record.uuid) + + let endpoint = container.endpoint + if (endpoint instanceof Promise) { + endpoint = await endpoint + } + await agent?.api.terminate(endpoint) + } + + /** + * Mark an agent as alive (updates lastSeen timestamp) + */ + ping (id: AgentUuid | ClientUuid): void { + const agent = this._agents.get(id as AgentUuid) + if (agent != null) { + agent.lastSeen = this.tickManager.now() + } + + // Agent could be also a client. + const client = this._clients.get(id as ClientUuid) + if (client != null) { + client.lastSeen = this.tickManager.now() + } + } + + async handleTimeout (client: ClientUuid): Promise { + // Handle outdated clients + const clientRecord = this._clients.get(client) + if (clientRecord !== undefined) { + for (const uuid of clientRecord.containers) { + await this.release(client, uuid) + } + } + this._clients.delete(client) + } + + /** + * Perform periodic health check of all registered agents + */ + private async checkAlive (): Promise { + const now = this.tickManager.now() + const deadAgents: AgentUuid[] = [] + + // Check each agent's last seen time + for (const [agentId, agentRecord] of this._agents.entries()) { + const timeSinceLastSeen = now - agentRecord.lastSeen + + if (timeSinceLastSeen > timeouts.aliveTimeout * 1000) { + console.warn(`Agent ${agentId} has been inactive for ${Math.round(timeSinceLastSeen / 1000)}s, marking as dead`) + deadAgents.push(agentId) + } + } + + // Remove dead agents and their containers + for (const agentId of deadAgents) { + await this.processDeadAgent(agentId) + } + + // Handle termination of orphaned containers + for (const container of [...this._orphanedContainers.values()]) { + void this.terminate(container).catch((err) => { + console.error(`Failed to terminate orphaned container ${container.record.uuid}: ${err.message}`) + }) + } + } + + /** + * Remove a dead agent and clean up its containers + */ + private async processDeadAgent (agentId: AgentUuid): Promise { + const agent = this._agents.get(agentId) + if (agent == null) { + return + } + + console.log(`Removing dead agent ${agentId} and its ${agent.containers.size} containers`) + + // Collect containers to remove + const affectedContainers: ContainerRecordImpl[] = [] + for (const [containerId, containerRecord] of agent.containers.entries()) { + affectedContainers.push(containerRecord) + this._containers.delete(containerId) + } + + // Remove agent + this._agents.delete(agentId) + + const containerEvent: ContainerEvent = { + added: [], + deleted: [], + updated: [] + } + + // We need to add requests for all used containers + for (const container of affectedContainers) { + if (!this._orphanedContainers.delete(container.record.endpoint)) { + // Container is used, we need to re-create it + const containerImpl = await this.getContainer( + container.record.uuid, + { kind: container.record.kind, extra: container.record.extra, labels: container.record.labels }, + [...Array.from(container.clients)] + ) + let endpoint = containerImpl.endpoint + if (endpoint instanceof Promise) { + endpoint = await endpoint + } + containerEvent.updated.push({ ...container.record, endpoint }) + } else { + containerEvent.deleted.push(container.record) + } + } + if (affectedContainers.length > 0) { + this.eventQueue.push(containerEvent) + } + } +} diff --git a/network/core/src/node/node.ts b/network/core/src/node/node.ts new file mode 100644 index 00000000000..c2878e35b8b --- /dev/null +++ b/network/core/src/node/node.ts @@ -0,0 +1,291 @@ +// import type { ClientBroadcast } from '../api/client' +// import type { NodeData, NodeDiscovery, WorkspaceDiscovery } from '../api/discovery' +// import type { Node, NodeAskOptions, NodeFactory, NodeManager, Workspace, WorkspaceFactory } from '../api/node' +// import type { Request, RequestAkn, Response, ResponseValue } from '../api/request' +// import { timeouts } from '../api/timeouts' +// import type { AccountUuid, NodeUuid, WorkspaceUuid } from '../api/types' +// import type { TickManager } from '../api/utils' +// import { groupByArray } from '../utils' + +// class WorkspaceSession { +// workspace: Workspace | Promise +// tick: number +// lastUse: number +// state: 'ready' | 'suspended' + +// constructor (workspace: Workspace | Promise, tick: number, lastUse: number, state: 'ready' | 'suspended') { +// this.workspace = workspace +// this.tick = tick +// this.lastUse = lastUse +// this.state = state +// } + +// async getWorkspace (): Promise { +// if (this.workspace instanceof Promise) { +// this.workspace = await this.workspace +// } +// return this.workspace +// } + +// async suspend (): Promise { +// const ws = await this.getWorkspace() +// if (this.state === 'ready') { +// this.workspace = ws.suspend().then(() => ws) +// await this.workspace +// this.state = 'suspended' +// } +// } + +// async resume (): Promise { +// const ws = await this.getWorkspace() +// if (this.state === 'suspended') { +// this.workspace = ws.resume().then(() => ws) +// await this.workspace +// this.state = 'ready' +// } +// } +// } + +// export class NodeImpl implements Node { +// workspaces: Record = {} +// constructor ( +// readonly _id: NodeUuid, +// readonly workspaceFactory: WorkspaceFactory, +// readonly workspaceDiscovery: WorkspaceDiscovery, +// readonly discovery: NodeManager, +// readonly tickManager: TickManager, +// readonly onClose?: () => Promise +// ) { +// this.tickManager.register(async (tick, tps) => { +// this.handleWorkspaceClose(tick, tps) +// }) +// } + +// onClientBroadcast?: ClientBroadcast + +// handleWorkspaceClose (tick: number, tps: number): void { +// const now = this.tickManager.now() +// for (const [wsid, { workspace, tick: wstick, lastUse }] of Object.entries(this.workspaces)) { +// if (tick % tps === wstick && !(workspace instanceof Promise)) { +// if (now - lastUse > timeouts.closeWorkspaceTimeout) { +// // Not used for 5 minutes, close it +// // eslint-disable-next-line @typescript-eslint/no-dynamic-delete +// delete this.workspaces[wsid as WorkspaceUuid] +// void workspace.suspend().catch() +// } +// } +// } +// } + +// async workspace (workspaceId: WorkspaceUuid): Promise { +// let workspace = this.workspaces[workspaceId] +// if (workspace == null) { +// // Create and store the promise immediately to prevent race conditions +// const wrk = this.workspaceFactory(workspaceId) +// const tick = this.tickManager.nextHash() +// workspace = new WorkspaceSession(wrk, tick, this.tickManager.now(), 'ready') +// this.workspaces[workspaceId] = workspace +// } +// if (workspace.state === 'suspended') { +// await workspace.resume() +// } + +// return workspace +// } + +// async ask(req: Request, options?: NodeAskOptions): Promise { +// const result: RequestAkn = { +// workspaces: {} +// } +// const workspaces = options?.target ?? (await this.workspaceDiscovery.byAccount(req.account)) + +// const byNode = await this.groupWorkspaces(workspaces) + +// // For self workspaces we need to resolve child workspaces. +// await this.includeChildWorkspaces(byNode, options) + +// const promises: Array> = [] +// for (const [node, _workspaces] of byNode.entries()) { +// const workspaces = _workspaces.filter((ws) => req.workspaces[ws] == null) +// if (workspaces.length === 0) { +// continue +// } + +// if (node === this._id) { +// const localWorkspaces = this.getFilteredWorkspaces(workspaces, options) + +// for (const ws of localWorkspaces) { +// req.workspaces[ws] = this._id +// result.workspaces[ws] = this._id +// } + +// void this.askLocal(req, localWorkspaces).catch((err) => { +// console.error('failed to ask local workspaces', err) +// }) +// } else { +// const wrk = await this.discovery.node(node) +// promises.push(this.askTo(req, wrk, workspaces, result, options)) +// } +// } +// await Promise.all(promises) +// return result +// } + +// private getFilteredWorkspaces (workspaces: WorkspaceUuid[], options: NodeAskOptions | undefined): WorkspaceUuid[] { +// let localWorkspaces = workspaces +// if (options?.workspace !== undefined) { +// const wsSet = new Set(options?.workspace) +// localWorkspaces = workspaces.filter((it) => wsSet.has(it)) +// } +// return localWorkspaces +// } + +// private async groupWorkspaces (workspaces: WorkspaceUuid[]): Promise> { +// const byNode = new Map() +// for (const workspace of workspaces) { +// const node = await this.discovery.byWorkspace(workspace) +// byNode.set(node, (byNode.get(node) ?? []).concat(workspace)) +// } +// return byNode +// } + +// private async includeChildWorkspaces ( +// byNode: Map, +// options?: NodeAskOptions +// ): Promise { +// const selfWorkspace = byNode.get(this._id) ?? [] +// if (selfWorkspace.length > 0) { +// let optionsSet: Set | undefined +// if (options?.workspace !== undefined) { +// // We need to enhance options to include child workspaces +// optionsSet = new Set(options.workspace) +// } +// for (const ws of selfWorkspace) { +// const childWs = await this.workspaceDiscovery.byWorkspace(ws) +// if (options?.workspace !== undefined && optionsSet !== undefined && optionsSet.has(ws)) { +// options.workspace.push(...childWs) +// } +// for (const cws of childWs) { +// const node = await this.discovery.byWorkspace(cws) +// byNode.set(node, (byNode.get(node) ?? []).concat(cws)) +// } +// } +// } +// } + +// async askTo( +// req: Request, +// wrk: Node, +// workspaces: WorkspaceUuid[], +// result: RequestAkn, +// options?: NodeAskOptions +// ): Promise { +// const response = await wrk.ask(req, { ...options, target: workspaces }) +// const localWorkspaces = new Set( +// this.getFilteredWorkspaces(Object.keys(response.workspaces) as WorkspaceUuid[], options) +// ) +// if (localWorkspaces.size > 0) { +// for (const [ws, nodeId] of Object.entries(response.workspaces)) { +// if (!localWorkspaces.has(ws as WorkspaceUuid)) { +// continue +// } +// result.workspaces[ws as WorkspaceUuid] = nodeId +// req.workspaces[ws as WorkspaceUuid] = nodeId +// } +// } +// } + +// async askLocal(req: Request, workspaces: WorkspaceUuid[]): Promise { +// const targetNode = await this.discovery.byAccount(req.account) +// const target = targetNode === this._id ? this : await this.discovery.node(targetNode) +// for (const ws of workspaces) { +// const worker = await this.workspace(ws) +// const data = await (await worker.getWorkspace()).ask(req) +// await target.broadcast([ +// { +// _id: req._id, +// account: req.account, +// workspaceId: ws, +// nodeId: this._id, +// data +// } +// ]) +// } +// } + +// async modify(workspaceId: WorkspaceUuid, req: Request): Promise> { +// const wsNode = await this.discovery.byWorkspace(workspaceId) + +// if (wsNode === this._id) { +// const wrk = await this.workspace(workspaceId) +// return await (await wrk.getWorkspace()).modify(req) +// } +// const node = await this.discovery.node(wsNode) +// return await node.modify(workspaceId, req) +// } + +// async broadcast(req: Array>): Promise { +// const byAccount = groupByArray(req, (it) => it.account) +// for (const [account, values] of byAccount.entries()) { +// const nodeId = await this.discovery.byAccount(account) +// if (this._id === nodeId) { +// // Broadcast to local clients +// await this.onClientBroadcast?.(account, values) +// } else { +// // Broadcast to remote node +// const wrk = await this.discovery.node(nodeId) +// await wrk.broadcast(values) +// } +// } +// } + +// async ping (workspaces: WorkspaceUuid[], processChildren: boolean): Promise { +// const wsSet = new Set(workspaces) + +// if (processChildren) { +// const toProcess = Array.from(wsSet) + +// while (toProcess.length > 0) { +// const ws = toProcess.pop() +// if (ws === undefined) { +// break +// } +// const childWs = await this.workspaceDiscovery.byWorkspace(ws) +// for (const cws of childWs) { +// if (!wsSet.has(cws)) { +// wsSet.add(cws) +// toProcess.push(cws) +// } +// } +// } +// } + +// const byNode = await this.groupWorkspaces(Array.from(wsSet)) + +// for (const [node, workspaces] of byNode.entries()) { +// if (node === this._id) { +// // Ping local workspaces +// for (const ws of workspaces) { +// const wrk = await this.workspace(ws) +// wrk.lastUse = this.tickManager.now() +// } +// } else { +// const wrk = await this.discovery.node(node) +// await wrk.ping(workspaces, false) +// } +// } +// } + +// async close (): Promise { +// for (const { workspace } of Object.values(this.workspaces)) { +// if (workspace instanceof Promise) { +// await workspace.then(async (w) => { +// await w.close() +// }) +// } else { +// await workspace.close() +// } +// } +// await this.onClose?.() +// } +// } diff --git a/network/core/src/node/session.ts b/network/core/src/node/session.ts new file mode 100644 index 00000000000..85ec4b5b74c --- /dev/null +++ b/network/core/src/node/session.ts @@ -0,0 +1,142 @@ +// import { v4 as uuid } from 'uuid' +// import { type AskOptions, type Client, type SessionManager } from '../api/client' +// import type { NodeDiscovery, WorkspaceDiscovery } from '../api/discovery' +// import type { Node } from '../api/node' +// import type { Request, RequestAkn, RequestId, Response, ResponseValue } from '../api/request' +// import { timeouts } from '../api/timeouts' +// import type { AccountUuid, WorkspaceUuid } from '../api/types' +// import type { TickManager } from '../api/utils' +// import type { NodeImpl } from './node' + +// interface RequestData { +// request: Request +// time: number +// responses: Array> +// akn: RequestAkn | undefined +// promise: Promise> + +// resolve: (value: ResponseValue) => void +// reject: (err: Error) => void +// } +// class SessionImpl implements Client { +// requests = new Map>() + +// onClose?: () => void +// onBroadcast?: ((response: Response) => void) | undefined + +// lastOp: number = performance.now() + +// constructor ( +// readonly account: AccountUuid, +// readonly sessionId: string, +// readonly localNode: Node, +// readonly tick: number +// ) {} + +// async ask(req: T, options?: AskOptions): Promise> { +// this.lastOp = performance.now() + +// const requestId = uuid() as RequestId +// const request: Request = { +// _id: requestId, +// account: this.account, +// data: req, +// workspaces: {} +// } + +// let resolveRequest = (value: ResponseValue): void => {} +// let rejectRequest = (_: Error): void => {} + +// const rdata: RequestData = { +// request, +// time: Date.now(), +// akn: undefined, +// responses: [], +// resolve: () => {}, +// reject: () => {}, +// promise: new Promise>((resolve, reject) => { +// resolveRequest = resolve as RequestData['resolve'] +// rejectRequest = reject as RequestData['reject'] +// }) +// } +// this.requests.set(requestId, rdata) +// rdata.resolve = resolveRequest +// rdata.reject = rejectRequest + +// rdata.akn = await this.localNode.ask(request, { ...(options ?? {}), target: undefined }) + +// this.checkResponses(rdata, rdata.responses) + +// return await rdata.promise +// } + +// async modify(workspaceId: WorkspaceUuid, req: T): Promise> { +// this.lastOp = performance.now() + +// const requestId = uuid() as RequestId +// const request: Request = { +// _id: requestId, +// account: this.account, +// data: req, +// workspaces: {} +// } +// return await this.localNode.modify(workspaceId, request) +// } + +// checkResponses (rdata: RequestData, responses: Array>): void { +// for (const response of responses) { +// if (response._id == null) { +// continue +// } +// if (rdata.akn?.workspaces[response.workspaceId] !== undefined) { +// // eslint-disable-next-line @typescript-eslint/no-dynamic-delete +// delete rdata.akn.workspaces[response.workspaceId] +// } +// } +// if (rdata.akn !== undefined && Object.keys(rdata.akn.workspaces).length === 0) { +// rdata.responses.sort((a, b) => a.workspaceId.localeCompare(b.workspaceId)) + +// // Flatten all response values properly +// const allValues = rdata.responses.flatMap((r) => r.data.value) +// const totalCount = rdata.responses.reduce((sum, r) => sum + r.data.total, 0) + +// rdata.resolve({ value: allValues, total: totalCount }) +// this.requests.delete(rdata.request._id) +// } +// } + +// handleResponse(responses: Array>): void { +// for (const response of responses) { +// if (response._id == null) { +// // This is a broadcast response, call the callback if it exists. +// this.onBroadcast?.(response) +// continue +// } + +// const rdata = this.requests.get(response._id) +// if (rdata == null) { +// console.warn('Response for unknown request', response._id, response) +// continue +// } + +// rdata.responses.push(response) + +// this.checkResponses(rdata, [response]) +// } +// } + +// close (): void { +// this.onClose?.() +// } + +// async retryIfNeeded (time: number): Promise { +// for (const [, rdata] of this.requests.entries()) { +// if (time - rdata.time > timeouts.retryTimeout) { +// const wsretry = Array.from(Object.keys(rdata.akn?.workspaces ?? {})) as WorkspaceUuid[] +// if (wsretry.length > 0) { +// await this.localNode.ask(rdata.request, { target: wsretry }) +// } +// } +// } +// } +// } diff --git a/network/core/src/utils.ts b/network/core/src/utils.ts new file mode 100644 index 00000000000..2cb187829de --- /dev/null +++ b/network/core/src/utils.ts @@ -0,0 +1,88 @@ +import type { TickHandler, TickManager } from './api/utils' + +export function groupByArray (array: T[], keyProvider: (item: T) => K): Map { + const result = new Map() + + array.forEach((item) => { + const key = keyProvider(item) + + if (!result.has(key)) { + result.set(key, [item]) + } else { + result.get(key)?.push(item) + } + }) + + return result +} + +/** + * Handles a time unification and inform about ticks. + */ +export class TickManagerImpl implements TickManager { + handlers: [TickHandler, number, number][] = [] + + hashCounter: number = 0 + + _tick: number = 0 + + constructor (readonly tps: number) { + if (tps > 1000 || tps < 1) { + throw new Error('Ticks per second has an invalid value: must be >= 1 && <= 1000') + } + } + + now (): number { + // Use performance.now() when available, otherwise fall back to Date.now() + // performance is available in recent Node versions, but guard for portability. + return (globalThis as any).performance?.now?.() ?? Date.now() + } + + nextHash (): number { + // Use post-increment so first hash can be 0, and avoid negative modulo issues. + return this.hashCounter++ % this.tps + } + + register (handler: TickHandler, interval: number): void { + if (!Number.isFinite(interval) || interval < 1) { + throw new Error('Interval must be a finite number >= 1 (seconds)') + } + const hash = this.nextHash() + this.handlers.push([handler, hash, interval]) + } + + async tick (): Promise { + this._tick++ + for (const [h, hash, interval] of this.handlers) { + try { + if (this.isMe(hash, interval)) { + await h() + } + } catch (err: any) { + console.error(`Error in tick handler for tick ${this._tick}:`, err) + } + } + } + + isMe (tickId: number, seconds: number): boolean { + if (!Number.isFinite(seconds) || seconds < 1) return false + // triggers once every (tps * seconds) ticks at the offset `tickId % tps` + return this._tick % (this.tps * seconds) === tickId % this.tps + } + + stop = () => {} + + start (): void { + const to = setInterval( + () => { + this.tick().catch((err) => { + console.error('Error in tick manager:', err) + }) + }, + Math.round(1000 / this.tps) + ) + this.stop = () => { + clearInterval(to) + } + } +} diff --git a/network/core/tsconfig.json b/network/core/tsconfig.json new file mode 100644 index 00000000000..c6a877cf6c3 --- /dev/null +++ b/network/core/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "./node_modules/@hcengineering/platform-rig/profiles/node/tsconfig.json", + + "compilerOptions": { + "rootDir": "./src", + "outDir": "./lib", + "declarationDir": "./types", + "tsBuildInfoFile": ".build/build.tsbuildinfo" + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "lib", "dist", "types", "bundle"] +} \ No newline at end of file diff --git a/network/docs/Schema.png b/network/docs/Schema.png new file mode 100644 index 00000000000..bcdbb8d42a4 Binary files /dev/null and b/network/docs/Schema.png differ diff --git a/network/docs/api-reference.md b/network/docs/api-reference.md new file mode 100644 index 00000000000..7507e1a6c57 --- /dev/null +++ b/network/docs/api-reference.md @@ -0,0 +1,950 @@ +# Huly Network API Reference + +Complete API reference for the Huly Virtual Network, including interfaces, types, and usage examples. + +## 📚 Table of Contents + +- [Core Interfaces](#core-interfaces) +- [Node Management](#node-management) +- [Workspace Operations](#workspace-operations) +- [Discovery Services](#discovery-services) +- [Session Management](#session-management) +- [Request/Response Types](#requestresponse-types) +- [Error Handling](#error-handling) +- [Usage Examples](#usage-examples) + +## 🔌 Core Interfaces + +### Node Interface + +The primary interface for network nodes that handle distributed operations. + +```typescript +interface Node { + /** + * Unique identifier for the node + */ + _id: NodeUuid + + /** + * Send a query request to the node + * @param req - The request object + * @param options - Optional request configuration + * @returns Promise resolving to request acknowledgment + */ + ask: (req: Request, options?: NodeAskOptions) => Promise + + /** + * Send a modification request to a specific workspace + * @param workspaceId - Target workspace identifier + * @param req - The modification request + * @returns Promise resolving to the response value + */ + modify: (workspaceId: WorkspaceUuid, req: Request) => Promise> + + /** + * Health check for workspaces + * @param workspaces - Array of workspace identifiers to ping + * @param processChildren - Whether to include child workspaces + */ + ping: (workspaces: WorkspaceUuid[], processChildren: boolean) => Promise + + /** + * Inform clients about some request/Response + * @param req - Array of responses to broadcast + */ + broadcast: (req: Array>) => Promise + + /** + * Gracefully close the node and cleanup resources + */ + close: () => Promise +} +``` + +### Workspace Interface + +Interface for individual workspace instances within nodes. + +```typescript +interface Workspace { + /** + * Unique identifier for the workspace + */ + _id: WorkspaceUuid + + /** + * Execute a query on the workspace + * @param req - The query request + * @returns Promise resolving to the query result + */ + ask: (req: Request) => Promise> + + /** + * Execute a modification on the workspace + * @param req - The modification request + * @returns Promise resolving to the modification result + */ + modify: (req: Request) => Promise> + + /** + * Suspend any system resources, be ready for a resume before any new requests. + */ + suspend: () => Promise + + /** + * A restore state and be able to respond for user actions. + */ + resume: () => Promise + + /** + * Permanently close the workspace and cleanup all resources + */ + close: () => Promise +} +``` + +## 🏗️ Node Management + +### NodeManager Interface + +Central manager for node discovery and access. + +```typescript +interface NodeManager extends NodeDiscovery { + /** + * Get a node instance by its identifier + * @param node - Node identifier + * @returns Promise resolving to the node instance + */ + node: (node: NodeUuid) => Promise +} +``` + +### NodeFactory Type + +Factory function for creating node instances. + +```typescript +type NodeFactory = (node: NodeUuid) => Promise +``` + +### NodeAskOptions + +Extended options for node query operations. + +```typescript +interface NodeAskOptions extends AskOptions { + /** + * Specific workspaces to target for the request + * If not specified, all accessible workspaces are queried + */ + target?: WorkspaceUuid[] +} +``` + +## 🏢 Workspace Operations + +### WorkspaceFactory Type + +Factory function for creating workspace instances. + +```typescript +type WorkspaceFactory = (workspaceId: WorkspaceUuid) => Promise +``` + +### Workspace Lifecycle + +Workspaces follow a specific lifecycle pattern: + +```mermaid +stateDiagram-v2 + [*] --> Created: WorkspaceFactory() + Created --> Active: First Request + Active --> Suspended: suspend() + Suspended --> Active: resume() + Active --> Closed: close() + Suspended --> Closed: close() + Closed --> [*] +``` + +### Example Workspace Implementation + +```typescript +class WorkspaceImpl implements Workspace { + constructor(public readonly _id: WorkspaceUuid, private pipeline: Pipeline, private config: WorkspaceConfig) {} + + async ask(req: Request): Promise> { + try { + // Validate request + await this.validateRequest(req) + + // Execute query through pipeline + const result = await this.pipeline.ask(req) + + // Transform and return result + return this.transformResponse(result) + } catch (error) { + throw new NetworkError('Query failed', { cause: error }) + } + } + + async modify(req: Request): Promise> { + try { + // Start transaction + const transaction = await this.pipeline.startTransaction() + + // Execute modification + const result = await transaction.modify(req) + + // Commit transaction + await transaction.commit() + + return this.transformResponse(result) + } catch (error) { + // Rollback on error + await transaction?.rollback() + throw new NetworkError('Modification failed', { cause: error }) + } + } + + async suspend(): Promise { + await this.pipeline.suspend() + this.config.suspended = true + } + + async resume(): Promise { + await this.pipeline.resume() + this.config.suspended = false + } + + async close(): Promise { + await this.pipeline.close() + } +} +``` + +## 🔍 Discovery Services + +### NodeDiscovery Interface + +Service for discovering and managing node topology. + +```typescript +interface NodeDiscovery { + /** + * Get the node responsible for a specific workspace + * @param workspace - Workspace identifier + * @returns Node identifier + */ + byWorkspace: (workspace: WorkspaceUuid) => Promise + + /** + * Get the node responsible for a specific account + * @param account - Account identifier + * @returns Node identifier + */ + byAccount: (account: AccountUuid) => Promise + + /** + * Get all available nodes + * @returns Iterable of node identifiers + */ + list: () => Iterable + + /** + * Get statistics/metadata for a specific node + * @param node - Node identifier + * @returns Node metadata + */ + stats: (node: NodeUuid) => Promise +} + +/** + * Node metadata type + */ +type NodeData = Record +``` + +### WorkspaceDiscovery Interface + +Service for discovering workspace locations and relationships. + +````typescript +interface WorkspaceDiscovery { + /** + * Get all workspaces accessible by an account + * @param account - Account identifier + * @returns Array of workspace identifiers + */ + byAccount: (account: AccountUuid) => Promise + + /** + * Get child workspaces of a parent workspace + * @param workspace - Parent workspace identifier + * @returns Array of child workspace identifiers + */ + byWorkspace: (workspace: WorkspaceUuid) => Promise +} + +### AccountDiscovery Interface + +Service for discovering accounts associated with workspaces. + +```typescript +interface AccountDiscovery { + /** + * Get all accounts that have access to a specific workspace + * @param workspace - Workspace identifier + * @returns Array of account identifiers + */ + byWorkspace: (workspace: WorkspaceUuid) => Promise +} +```` + +## 🚀 Transport Layer + +### ClientTransport Interface + +Interface for client-side transport communication. + +```typescript +interface ClientTransport { + /** + * Send a request to a specific client + * @param clientId - Account identifier + * @param reqId - Request identifier + * @param body - Request body + * @returns Promise resolving to response + */ + request: (clientId: AccountUuid, reqId: RequestId, body: any) => Promise + + /** + * Subscribe to messages for an account + * @param account - Account identifier + */ + subscribe: (account: AccountUuid) => void + + /** + * Unsubscribe from messages for an account + * @param account - Account identifier + */ + unsubscribe: (account: AccountUuid) => void + + /** + * Close the transport connection + */ + close: () => Promise +} +``` + +### ServerTransport Interface + +Interface for server-side transport communication. + +```typescript +interface ServerTransport { + /** + * Node identifier for this transport + */ + nodeId: NodeUuid + + /** + * Send a request to a target node + * @param target - Target node identifier + * @param body - Request body + * @returns Promise resolving to response + */ + request: (target: NodeUuid, body: any) => Promise + + /** + * Send a message to a target node + * @param target - Target node identifier + * @param reqId - Request identifier (optional) + * @param body - Message body + */ + send: (target: NodeUuid, reqId: RequestId | undefined, body: any) => Promise + + /** + * Close the transport connection + */ + close: () => Promise +} +``` + +### Static Discovery Implementation + +```typescript +class StaticNodeDiscovery implements NodeDiscovery { + private nodes: Map + private accountHashRing: ConsistentHashRing + + constructor(nodes: Array<[NodeUuid, NodeMetadata]>) { + this.nodes = new Map(nodes) + this.accountHashRing = new ConsistentHashRing(Array.from(this.nodes.keys())) + } + + async getAccountNode(account: AccountUuid): Promise { + return this.accountHashRing.getNode(account) + } + + async getNodes(): Promise { + return Array.from(this.nodes.keys()) + } + + async registerNode(node: NodeUuid, metadata: NodeMetadata): Promise { + this.nodes.set(node, metadata) + this.accountHashRing.addNode(node) + } + + async unregisterNode(node: NodeUuid): Promise { + this.nodes.delete(node) + this.accountHashRing.removeNode(node) + } +} +``` + +## 👥 Session Management + +### SessionManager Interface + +Central coordinator for client sessions and workspace access. + +```typescript +interface SessionManager { + /** + * Register a new client session + * @param account - Account identifier + * @param sessionId - Session identifier + * @returns Client interface for the session + */ + register: (account: AccountUuid, sessionId: string) => Promise + + /** + * Unregister and close a client session + * @param sessionId - Session identifier + */ + unregister: (sessionId: string) => Promise + + /** + * Close the session manager and all active sessions + */ + close: () => void +} +``` + +### Client Interface + +Interface for client sessions to interact with the network. + +```typescript +interface Client { + /** + * Account associated with this client + */ + account: AccountUuid + + /** + * Unique session identifier + */ + sessionId: string + + /** + * Send a query request + * @param req - The request data + * @param options - Optional request configuration + * @returns Promise resolving to the response + */ + ask: (req: T, options?: AskOptions) => Promise> + + /** + * Send a modification request to a specific workspace + * @param workspaceId - Target workspace identifier + * @param req - The modification request data + * @returns Promise resolving to the response + */ + modify: (workspaceId: WorkspaceUuid, req: T) => Promise> + + /** + * Callback for handling broadcast messages + */ + onBroadcast?: (response: Response) => void + + /** + * Callback for handling session close + */ + onClose?: () => void +} +``` + +## 📨 Request/Response Types + +### Core Types + +```typescript +/** + * Unique identifier types + */ +type WorkspaceUuid = string & { __workspaceUuid: true } +type AccountUuid = string & { __accountUuid: true } +type NodeUuid = string & { __nodeUuid: true } + +/** + * Request identifier type + */ +type RequestId = string & { __requestId: true } + +/** + * Request structure + */ +interface Request { + _id: RequestId + account: AccountUuid + + // Workspace filter + workspace?: WorkspaceUuid | WorkspaceUuid[] + + workspaces: Record // A list of already processed workspaces. + data: T +} + +/** + * Response structure + */ +interface Response { + _id: RequestId | undefined + account: AccountUuid + + nodeId: NodeUuid + workspaceId: WorkspaceUuid + data: ResponseValue +} + +/** + * Response value wrapper + */ +interface ResponseValue { + value: T[] + total: number +} + +/** + * Request acknowledgment + */ +interface RequestAkn { + // A list of nodes we need to retrieve data from, or retry to ask again if required. + workspaces: Record +} +``` + +### Request Options + +```typescript +interface AskOptions { + /** + * Specific workspaces to target for the request + */ + workspace?: WorkspaceUuid[] +} +``` + +### Node Metadata + +```typescript +interface NodeMetadata { + /** + * Geographic region where the node is located + */ + region: string + + /** + * Processing capacity of the node + */ + capacity: number + + /** + * Network endpoints for the node + */ + endpoints?: { + internal: string + external: string + } + + /** + * Node status information + */ + status?: { + healthy: boolean + lastSeen: number + version: string + } +} +``` + +## ❌ Error Handling + +### NetworkError Class + +```typescript +class NetworkError extends Error { + constructor(message: string, public code?: string, public details?: any) { + super(message) + this.name = 'NetworkError' + } +} +``` + +### Error Types + +```typescript +enum NetworkErrorCode { + // Connection errors + CONNECTION_FAILED = 'CONNECTION_FAILED', + CONNECTION_TIMEOUT = 'CONNECTION_TIMEOUT', + CONNECTION_REFUSED = 'CONNECTION_REFUSED', + + // Authentication errors + AUTHENTICATION_FAILED = 'AUTHENTICATION_FAILED', + AUTHORIZATION_FAILED = 'AUTHORIZATION_FAILED', + SESSION_EXPIRED = 'SESSION_EXPIRED', + + // Request errors + INVALID_REQUEST = 'INVALID_REQUEST', + REQUEST_TIMEOUT = 'REQUEST_TIMEOUT', + RATE_LIMITED = 'RATE_LIMITED', + + // Workspace errors + WORKSPACE_NOT_FOUND = 'WORKSPACE_NOT_FOUND', + WORKSPACE_UNAVAILABLE = 'WORKSPACE_UNAVAILABLE', + WORKSPACE_SUSPENDED = 'WORKSPACE_SUSPENDED', + + // Node errors + NODE_NOT_FOUND = 'NODE_NOT_FOUND', + NODE_UNAVAILABLE = 'NODE_UNAVAILABLE', + NODE_OVERLOADED = 'NODE_OVERLOADED', + + // System errors + INTERNAL_ERROR = 'INTERNAL_ERROR', + SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE', + MAINTENANCE_MODE = 'MAINTENANCE_MODE' +} +``` + +### Error Handling Patterns + +```typescript +// Retry with exponential backoff +async function retryWithBackoff( + operation: () => Promise, + maxRetries: number = 3, + baseDelay: number = 1000 +): Promise { + let lastError: Error + + for (let attempt = 0; attempt <= maxRetries; attempt++) { + try { + return await operation() + } catch (error) { + lastError = error + + if (attempt === maxRetries) { + throw new NetworkError('Max retries exceeded', 'MAX_RETRIES', { + attempts: attempt + 1, + lastError + }) + } + + // Exponential backoff with jitter + const delay = baseDelay * Math.pow(2, attempt) + Math.random() * 1000 + await new Promise((resolve) => setTimeout(resolve, delay)) + } + } + + throw lastError +} + +// Circuit breaker pattern +class CircuitBreaker { + private failures: number = 0 + private lastFailTime: number = 0 + private state: 'closed' | 'open' | 'half-open' = 'closed' + + constructor(private failureThreshold: number = 5, private timeout: number = 60000) {} + + async execute(operation: () => Promise): Promise { + if (this.state === 'open') { + if (Date.now() - this.lastFailTime > this.timeout) { + this.state = 'half-open' + } else { + throw new NetworkError('Circuit breaker is open', 'CIRCUIT_OPEN') + } + } + + try { + const result = await operation() + this.onSuccess() + return result + } catch (error) { + this.onFailure() + throw error + } + } + + private onSuccess(): void { + this.failures = 0 + this.state = 'closed' + } + + private onFailure(): void { + this.failures++ + this.lastFailTime = Date.now() + + if (this.failures >= this.failureThreshold) { + this.state = 'open' + } + } +} +``` + +## 💡 Usage Examples + +### Basic Client Usage + +```typescript +import { SessionManagerImpl, StaticNodeDiscovery, StaticWorkspaceDiscovery } from '@hcengineering/network' + +// Setup discovery services +const nodeDiscovery = new StaticNodeDiscovery([ + ['node1', { region: 'us-east', capacity: 100 }], + ['node2', { region: 'us-west', capacity: 150 }] +]) + +const workspaceDiscovery = new StaticWorkspaceDiscovery({ + user1: ['workspace1', 'workspace2'], + user2: ['workspace3'] +}) + +// Create session manager +const sessionManager = new SessionManagerImpl(nodeFactory, operationHandler, workspaceDiscovery, nodeDiscovery) + +// Register client and perform operations +async function example() { + // Register a new client session + const client = await sessionManager.register('user1' as AccountUuid, 'session1') + + // Set up broadcast handler + client.onBroadcast = (response) => { + console.log('Received broadcast:', response) + } + + // Perform a query + const queryResult = await client.ask( + { + method: 'findDocuments', + collection: 'tasks', + filter: { status: 'active' } + }, + { + timeout: 5000, + useCache: true + } + ) + + // Perform a modification + const modifyResult = await client.modify('workspace1' as WorkspaceUuid, { + method: 'updateDocument', + collection: 'tasks', + id: 'task123', + updates: { status: 'completed' } + }) + + console.log('Query result:', queryResult) + console.log('Modify result:', modifyResult) +} +``` + +### Advanced Node Implementation + +```typescript +class AdvancedNode implements Node { + private workspaces: Map = new Map() + private circuitBreaker = new CircuitBreaker() + + constructor(public readonly _id: NodeUuid, private workspaceFactory: WorkspaceFactory, private config: NodeConfig) {} + + async ask(req: Request, options?: NodeAskOptions): Promise { + const requestId = generateId() + + try { + // Process request with circuit breaker + await this.circuitBreaker.execute(async () => { + const workspaces = options?.target || (await this.getAvailableWorkspaces()) + + // Distribute query across target workspaces + const promises = workspaces.map(async (workspaceId) => { + const workspace = await this.getOrCreateWorkspace(workspaceId) + return workspace.ask(req) + }) + + // Wait for all responses with timeout + const results = await Promise.allSettled(promises) + + // Aggregate results + const aggregatedResult = this.aggregateResults(results) + + // Store result for later retrieval + await this.storeResult(requestId, aggregatedResult) + }) + + return { + id: requestId, + acknowledged: true, + timestamp: Date.now() + } + } catch (error) { + throw new NetworkError('Ask operation failed', 'ASK_FAILED', { + requestId, + error: error.message + }) + } + } + + async modify(workspaceId: WorkspaceUuid, req: Request): Promise> { + try { + const workspace = await this.getOrCreateWorkspace(workspaceId) + return await workspace.modify(req) + } catch (error) { + throw new NetworkError('Modify operation failed', 'MODIFY_FAILED', { + workspaceId, + error: error.message + }) + } + } + + async ping(workspaces: WorkspaceUuid[], processChildren: boolean): Promise { + const promises = workspaces.map(async (workspaceId) => { + try { + const workspace = this.workspaces.get(workspaceId) + if (workspace) { + // Perform health check + await this.checkWorkspaceHealth(workspace) + + if (processChildren) { + const children = await this.getChildWorkspaces(workspaceId) + await this.ping(children, false) + } + } + } catch (error) { + console.warn(`Ping failed for workspace ${workspaceId}:`, error) + } + }) + + await Promise.allSettled(promises) + } + + async broadcast(responses: Array>): Promise { + // Implement broadcast logic based on your transport layer + // This could use WebSockets, message queues, etc. + for (const response of responses) { + await this.sendToClients(response) + } + } + + async close(): Promise { + // Close all workspaces + const closePromises = Array.from(this.workspaces.values()).map((ws) => ws.close()) + await Promise.allSettled(closePromises) + + this.workspaces.clear() + } + + private async getOrCreateWorkspace(workspaceId: WorkspaceUuid): Promise { + let workspace = this.workspaces.get(workspaceId) + + if (!workspace) { + workspace = await this.workspaceFactory(workspaceId) + this.workspaces.set(workspaceId, workspace) + } + + return workspace + } + + private aggregateResults(results: PromiseSettledResult[]): any { + const successful = results + .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') + .map((result) => result.value) + + // Implement your aggregation logic here + return successful.reduce((acc, result) => { + // Merge results based on your data structure + return { ...acc, ...result } + }, {}) + } +} +``` + +### Custom Discovery Service + +```typescript +class DatabaseNodeDiscovery implements NodeDiscovery { + constructor(private database: Database) {} + + async getAccountNode(account: AccountUuid): Promise { + const result = await this.database.query('SELECT node_id FROM account_node_mapping WHERE account_id = ?', [account]) + + if (result.length === 0) { + // Assign node using consistent hashing + const availableNodes = await this.getNodes() + const nodeId = this.hashToNode(account, availableNodes) + + // Store mapping in database + await this.database.execute('INSERT INTO account_node_mapping (account_id, node_id) VALUES (?, ?)', [ + account, + nodeId + ]) + + return nodeId + } + + return result[0].node_id as NodeUuid + } + + async getNodes(): Promise { + const result = await this.database.query('SELECT node_id FROM nodes WHERE status = "active"') + + return result.map((row) => row.node_id as NodeUuid) + } + + async registerNode(node: NodeUuid, metadata: NodeMetadata): Promise { + await this.database.execute( + 'INSERT OR REPLACE INTO nodes (node_id, metadata, status, last_seen) VALUES (?, ?, "active", ?)', + [node, JSON.stringify(metadata), Date.now()] + ) + } + + async unregisterNode(node: NodeUuid): Promise { + await this.database.execute('UPDATE nodes SET status = "inactive" WHERE node_id = ?', [node]) + } + + private hashToNode(account: AccountUuid, nodes: NodeUuid[]): NodeUuid { + // Simple hash-based selection + const hash = this.simpleHash(account) + const index = hash % nodes.length + return nodes[index] + } + + private simpleHash(str: string): number { + let hash = 0 + for (let i = 0; i < str.length; i++) { + const char = str.charCodeAt(i) + hash = (hash << 5) - hash + char + hash = hash & hash // Convert to 32-bit integer + } + return Math.abs(hash) + } +} +``` + +--- + +For more detailed examples and advanced usage patterns, refer to the [Huly Examples Repository](https://github.com/hcengineering/huly-examples). diff --git a/network/docs/readme.md b/network/docs/readme.md new file mode 100644 index 00000000000..181b7d1a5b8 --- /dev/null +++ b/network/docs/readme.md @@ -0,0 +1,208 @@ +# Huly Virtual Network + +A distributed network architecture for the Huly platform that enables scalable, fault-tolerant communication between accounts, workspaces, and nodes. + +![Schema](./Schema.png) + +## Overview + +The Huly virtual network implements a distributed system with the following key components: + +- **Nodes**: Computational units that handle requests and manage workspaces +- **Workspaces**: Isolated environments that contain application data and logic +- **Accounts**: User identities that can access multiple workspaces +- **Sessions**: Client connections to the network + +## Architecture Components + +### Account → Workspace Mapping + +The `AccountDB` is responsible for mapping `AccountUuid` to `WorkspaceUuid[]` representing all workspaces accessible by a given account. + +**Key Features:** + +- Multi-tenant workspace access +- Role-based permissions (Owner, Member, Guest) +- Workspace discovery and enumeration + +### Account → Node Mapping + +Each account is mapped to a specific node using a consistent hashing algorithm: + +```text +AccountUuid → hash → DHT → NodeId +``` + +**Implementation:** + +```typescript +hash(AccountUuid) % nodes.length +``` + +**Benefits:** + +- Load balancing across nodes +- Consistent routing for user operations +- Fault tolerance through re-hashing + +### Workspace → Workspace Mapping + +Workspaces can aggregate content from sub-workspaces, enabling hierarchical organization and unified access patterns. + +**Features:** + +- Parent-child workspace relationships +- Cascading operations across workspace hierarchies +- Unified query interface for related workspaces + +### Workspace Lifecycle Management + +Workspaces have complex startup/shutdown cycles managed by the network: + +- **Lazy Loading**: Workspaces are activated on-demand +- **Resource Management**: Automatic cleanup of unused workspaces +- **Health Monitoring**: Continuous workspace health checks + +## Core Operations + +### Query Operations (Map/Reduce) + +Distributed query processing across multiple workspaces: + +```text +1. Request with RequestId +2. AccountUuid → PersonalId → NodeId (routing) +3. Post request to Personal NodeId + 3.1 Personal Node: Resolve workspace → NodeIds mapping + 3.2 Personal Node: Distribute query to required nodes + 4.1 Target Nodes: Check workspace status, activate if needed + 4.2 Target Nodes: Execute query on workspace + 4.3 Target Nodes: Process child workspaces if applicable + 4.4 Target Nodes: Subscribe to workspace changes + 4.5 Target Nodes: Perform map/reduce on results + 4.6 Target Node: Pass result to personal Node. + 3.3 Personal Node: Pass result to client. + 3.4. Collect and aggregate responses + 3.5. Handle retries for failed workspaces + 3.6. Cancel requests when needed + 3.7. Return final response to client +``` + +### Modify Operations + +Transactional modifications across the distributed system: + +```text +1. Request with RequestId +2. AccountUuid + PersonalId → NodeId (routing) +3. Post to Personal NodeId + 3.1 Personal Node: WorkspaceId → NodeId resolution + 4.1 Target Node: Execute operation on workspace + 4.2 Target Node: Return response to personal node + 3.2 Personal Node: Forward response to client +``` + +### Broadcast Operations + +Efficient message distribution to multiple clients: + +**Account Broadcast:** + +```text +1. AccountUuid → PersonalId → NodeId (targeting) +2. Post message to client's personal node +3. Node broadcasts to all connected clients +``` + +**Workspace Broadcast:** + +```text +1. WorkspaceId → AccountUuid[] → NodeId[] (fan-out) +2. Broadcast to all relevant nodes +3. Each node broadcasts to its connected clients +``` + +## Implementation Details + +### Core Interfaces + +**Node Interface:** + +```typescript +interface Node { + _id: NodeUuid + ask: (req: Request, options?: AskOptions) => Promise + modify: (workspaceId: WorkspaceUuid, req: Request) => Promise> + ping: (accounts: AccountUuid[]) => Promise + broadcast: (req: Array>) => Promise + close: () => Promise +} +``` + +**Workspace Interface:** + +```typescript +interface Workspace { + _id: WorkspaceUuid + lastUse: number + ask: (req: Request) => Promise> + modify: (req: Request) => Promise> + ping: () => void + close: () => Promise +} +``` + +### Discovery Services + +**Node Discovery:** + +- Hash-based node selection +- Health monitoring and failover +- Dynamic node registration/deregistration + +**Workspace Discovery:** + +- Account-to-workspace mapping +- Workspace hierarchy resolution +- Real-time workspace availability + +### Session Management + +**Client Session:** + +```typescript +interface Client { + account: AccountUuid + sessionId: string + ask: (req: T, options?: AskOptions) => Promise> + modify: (workspaceId: WorkspaceUuid, req: T) => Promise> + onBroadcast?: (response: Response) => void + onClose?: () => void +} +``` + +## Usage Examples + +```typescript +// Initialize node discovery +const nodeDiscovery = new StaticNodeDiscovery([ + ['node1', { region: 'us-east', capacity: 100 }], + ['node2', { region: 'us-west', capacity: 150 }] +]); + +// Create workspace discovery +const workspaceDiscovery = new StaticWorkspaceDiscovery({ + 'user1': ['workspace1', 'workspace2'], + 'user2': ['workspace3'] +}); + +// Initialize session manager +const sessionManager = new SessionManagerImpl(nodeFactory, operationHandler, workspaceDiscovery, nodeDiscovery); + +// Register client +const client = await sessionManager.register('user1', 'session1'); + +// Perform operations +const result = await client.ask('query-data', { workspace: ['workspace1'] }); +await client.modify('workspace1', { action: 'update', data: {...} }); +``` diff --git a/network/todo.md b/network/todo.md new file mode 100644 index 00000000000..4e6a84f3f2d --- /dev/null +++ b/network/todo.md @@ -0,0 +1,22 @@ +# Problems not solve + +- [x] Basic functional implementaion and zeromq transport based implementation. + + - [x] basic tests + +- Add opentelementry for monitoring/logging + +- Add docker + real network benchmark test + +- Retry logic - need more carefully track request/response retry in case of node/workspace mises. + +- Rate limit logic not implemented, unclear how to manage it now. + +- Memory overhelming issues, if ask request too many data per node or final reduce node. + + 1. Possible solution implement streaming of responses. + 2. Add limits per workspace request? + +- Work on more real life examples. Integrate into platform. + +- Not sure if warmup/ping is really needed. diff --git a/network/zeromq/.eslintrc.js b/network/zeromq/.eslintrc.js new file mode 100644 index 00000000000..ce90fb9646f --- /dev/null +++ b/network/zeromq/.eslintrc.js @@ -0,0 +1,7 @@ +module.exports = { + extends: ['./node_modules/@hcengineering/platform-rig/profiles/node/eslint.config.json'], + parserOptions: { + tsconfigRootDir: __dirname, + project: './tsconfig.json' + } +} diff --git a/network/zeromq/.npmignore b/network/zeromq/.npmignore new file mode 100644 index 00000000000..e3ec093c383 --- /dev/null +++ b/network/zeromq/.npmignore @@ -0,0 +1,4 @@ +* +!/lib/** +!CHANGELOG.md +/lib/**/__tests__/ diff --git a/network/zeromq/config/rig.json b/network/zeromq/config/rig.json new file mode 100644 index 00000000000..78cc5a17334 --- /dev/null +++ b/network/zeromq/config/rig.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://developer.microsoft.com/json-schemas/rig-package/rig.schema.json", + "rigPackageName": "@hcengineering/platform-rig", + "rigProfile": "node" +} diff --git a/network/zeromq/jest.config.js b/network/zeromq/jest.config.js new file mode 100644 index 00000000000..2cfd408b679 --- /dev/null +++ b/network/zeromq/jest.config.js @@ -0,0 +1,7 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + testMatch: ['**/?(*.)+(spec|test).[jt]s?(x)'], + roots: ["./src"], + coverageReporters: ["text-summary", "html"] +} diff --git a/network/zeromq/package.json b/network/zeromq/package.json new file mode 100644 index 00000000000..fb8cc290012 --- /dev/null +++ b/network/zeromq/package.json @@ -0,0 +1,42 @@ +{ + "name": "@hcengineering/network-zeromq", + "version": "0.6.0", + "main": "lib/index.js", + "svelte": "src/index.ts", + "types": "types/index.d.ts", + "author": "Anticrm Platform Contributors", + "template": "@hcengineering/node-package", + "license": "EPL-2.0", + "scripts": { + "build": "compile", + "build:watch": "compile", + "test": "jest --passWithNoTests --silent --forceExit", + "format": "format src", + "_phase:build": "compile transpile src", + "_phase:test": "jest --passWithNoTests --silent --forceExit", + "_phase:format": "format src", + "_phase:validate": "compile validate" + }, + "devDependencies": { + "@hcengineering/platform-rig": "^0.6.0", + "@typescript-eslint/eslint-plugin": "^6.11.0", + "eslint-plugin-import": "^2.26.0", + "eslint-plugin-promise": "^6.1.1", + "eslint-plugin-n": "^15.4.0", + "eslint": "^8.54.0", + "@typescript-eslint/parser": "^6.11.0", + "eslint-config-standard-with-typescript": "^40.0.0", + "prettier": "^3.1.0", + "typescript": "^5.8.3", + "@types/node": "^22.15.29", + "jest": "^29.7.0", + "ts-jest": "^29.1.1", + "@types/jest": "^29.5.5", + "@types/uuid": "^8.3.1" + }, + "dependencies": { + "@hcengineering/network": "^0.6.0", + "zeromq": "^6.5.0", + "uuid": "^8.3.2" + } +} diff --git a/network/zeromq/src/__test__/backrpc.spec.ts b/network/zeromq/src/__test__/backrpc.spec.ts new file mode 100644 index 00000000000..f7ae104d98c --- /dev/null +++ b/network/zeromq/src/__test__/backrpc.spec.ts @@ -0,0 +1,104 @@ +import { TickManagerImpl } from '@hcengineering/network' +import { BackRPCClient, BackRPCServer, type ClientId } from '../backrpc' + +describe('backrpc', () => { + it('test request/response', async () => { + const tickMgr = new TickManagerImpl(10) + const server = new BackRPCServer({ + requestHandler: async (client, method, params, send) => { + switch (method) { + case 'hello': + await send('World') + break + case 'do': + await send(await server.request(client, 'callback', '')) + break + default: + await send('unknown') + } + }, + helloHandler: async (clientId) => { + console.log(`Client ${clientId} connected`) + } + }, tickMgr) + + const client = new BackRPCClient( + 'client1' as ClientId, + { + requestHandler: async (method, param, send) => { + if (method === 'callback') { + await send('callback-value') + } + await send('') + }, + onRegister: async () => { + console.log('Client registered') + } + }, 'localhost', await server.getPort(), tickMgr) + + const response = await client.request('hello', 'world') + expect(response).toBe('World') + + const response2 = await client.request('do', '') + expect(response2).toBe('callback-value') + + client.close() + server.close() + }) + + it('test check-register', async () => { + const tickMgr = new TickManagerImpl(10) + let server = new BackRPCServer({ + requestHandler: async (client, method, params, send) => { + }, + helloHandler: async (clientId) => { + console.log(`Client ${clientId} connected`) + } + }, tickMgr, '*', 8701) + + let registered = 0 + + let doResolve: () => void + const p: Promise = new Promise((resolve) => { + doResolve = () => { resolve() } + }) + + let doResolve2: () => void + const p2: Promise = new Promise((resolve) => { + doResolve2 = () => { resolve() } + }) + const client = new BackRPCClient( + 'client1' as ClientId, + { + requestHandler: async (method, param, send) => { + }, + onRegister: async () => { + registered++ + doResolve() + if (registered === 2) { + doResolve2() + } + } + }, 'localhost', 8701, tickMgr) + + await p + + expect(registered).toBe(1) + + server.close() + + server = new BackRPCServer({ + requestHandler: async (client, method, params, send) => { + }, + helloHandler: async (clientId) => { + console.log(`Client ${clientId} connected`) + } + }, tickMgr, '*', 8701) + + await p2 + expect(registered).toBe(2) + + client.close() + server.close() + }) +}) diff --git a/network/zeromq/src/__test__/network.spec.ts b/network/zeromq/src/__test__/network.spec.ts new file mode 100644 index 00000000000..28c36013a3c --- /dev/null +++ b/network/zeromq/src/__test__/network.spec.ts @@ -0,0 +1,189 @@ +import { + AgentImpl, + composeCID, + NetworkImpl, + TickManagerImpl, + type AgentEndpointRef, + type AgentUuid, + type ClientUuid, + type Container, + type ContainerConnection, + type ContainerEndpointRef, + type ContainerKind, + type ContainerUuid, + type NetworkClient, + type TickManager +} from '@hcengineering/network' +import { NetworkAgentServer } from '../agent' +import { BackRPCServer } from '../backrpc' +import { NetworkClientImpl } from '../client' +import { containerDirectRef, containerOnAgentEndpointRef, EndpointKind, parseEndpointRef } from '../endpoints' +import { NetworkServer } from '../server' + +const agents = { + agent1: 'agent1' as AgentUuid, + agent2: 'agent2' as AgentUuid +} + +const kinds = { + session: 'session' as ContainerKind, + workspace: 'workspace' as ContainerKind +} + +class DummySessionContainer implements Container { + async request (operation: string, data?: any, clientId?: ClientUuid): Promise { + if (operation === 'test') { + for (const [k, bk] of this.eventHandlers.entries()) { + if (k === clientId) { + await bk('event') + } + } + return 'test-ok' + } + throw new Error('Unknown operation') + } + + // Called when the container is terminated + onTerminated?: () => void + + async terminate (): Promise {} + + async ping (): Promise {} + + connect (clientId: ClientUuid, handler: (data: any) => Promise): void { + this.eventHandlers.set(clientId, handler) + } + + disconnect (clientId: ClientUuid): void { + this.eventHandlers.delete(clientId) + } + + private readonly eventHandlers = new Map Promise>() +} + +class DummyWorkspaceContainer implements Container { + server!: BackRPCServer + + constructor ( + readonly uuid: ContainerUuid, + readonly agentId: AgentUuid, + readonly networkClient: NetworkClient + ) {} + + async start (tickMgr: TickManager): Promise { + this.server = new BackRPCServer( + { + requestHandler: async (client, method, params, send) => { + // Handle incoming requests + if (method === 'test') { + await send('test-ok') + } + throw new Error('Unknown method') + } + }, + tickMgr, + 'localhost', + 0 + ) + const port = await this.server.getPort() + return containerDirectRef('localhost', port, this.uuid, this.agentId) + } + + async request (operation: string, data?: any): Promise { + return '' + } + + // Called when the container is terminated + onTerminated?: () => void + + async terminate (): Promise { + this.server.close() + } + + async ping (): Promise {} + + connect (clientId: ClientUuid, handler: (data: any) => Promise): void { + this.eventHandlers.set(clientId, handler) + } + + disconnect (clientId: ClientUuid): void { + this.eventHandlers.delete(clientId) + } + + private readonly eventHandlers = new Map Promise>() +} + +function createAgent1 (tickMgr: TickManagerImpl, networkClient: NetworkClient): AgentImpl { + const agent: AgentImpl = new AgentImpl(agents.agent1, { + [kinds.session]: async (uuid) => { + return [new DummySessionContainer(), containerOnAgentEndpointRef(agent.endpoint as AgentEndpointRef, uuid)] + }, + [kinds.workspace]: async (uuid) => { + const container = new DummyWorkspaceContainer(uuid, agent.uuid, networkClient) + const endpoint = await container.start(tickMgr) + return [container, endpoint] + } + }) + return agent +} + +jest.setTimeout(500000) + +describe('check network server is working fine', () => { + it('check client connect to network', async () => { + const tickMgr = new TickManagerImpl(10) // 10 ticks per second + const net = new NetworkImpl(tickMgr) + + // we need some values to be available + const network = new NetworkServer(net, tickMgr, '*', 0) + + const networkClient: NetworkClient = new NetworkClientImpl('localhost', await network.rpcServer.getPort(), tickMgr) + + const agent = createAgent1(tickMgr, networkClient) + // Random port on * + const agentServer = new NetworkAgentServer(tickMgr, 'localhost', '*', 0) + await agentServer.start(agent) + + await networkClient.register(agent) + + const _kinds = await networkClient.kinds() + expect(_kinds).toEqual(['session', 'workspace']) + + const _agents = await networkClient.agents() + expect(_agents.length).toEqual(1) + expect(_agents[0].agentId).toEqual(agents.agent1) + expect(_agents[0].containers.length).toEqual(0) + + // Start a new container and check if messaging works + const containerRef = await networkClient.get(composeCID('session', 'user1'), { kind: kinds.session }) + + const data = parseEndpointRef(containerRef.endpoint) + expect(data.kind).toEqual(EndpointKind.routed) + + const containers = await networkClient.list(kinds.session) + expect(containers.length).toEqual(1) + + const containerConnection: ContainerConnection = await containerRef.connect() + expect(containerConnection).toBeDefined() + + // Verify requests and events are working fine. + const events: any[] = [] + const p = new Promise(resolve => { + containerConnection.on = async (data) => { + events.push(data) + resolve() + } + }) + + const resp1 = await containerConnection.request('test') + expect(resp1).toEqual('test-ok') + + await p + expect(events.length).toEqual(1) + expect(events[0]).toEqual('event') + + await agentServer.close() + await networkClient.close() + await network.close() + }) +}) diff --git a/network/zeromq/src/__test__/samples.ts b/network/zeromq/src/__test__/samples.ts new file mode 100644 index 00000000000..dacd9362684 --- /dev/null +++ b/network/zeromq/src/__test__/samples.ts @@ -0,0 +1,27 @@ +import type { AccountUuid, WorkspaceUuid } from '@hcengineering/network' +import { StaticWorkspaceDiscovery } from '@hcengineering/network' + +export const workspaces = { + ws1: 'ws1' as WorkspaceUuid, + ws2: 'ws2' as WorkspaceUuid, + ws3: 'ws3' as WorkspaceUuid, + ws4: 'ws4' as WorkspaceUuid, + ws5: 'ws5' as WorkspaceUuid, + ws6: 'ws6' as WorkspaceUuid, + ws7: 'ws7' as WorkspaceUuid, + ws8: 'ws8' as WorkspaceUuid, + ws9: 'ws9' as WorkspaceUuid, + ws10: 'ws10' as WorkspaceUuid +} + +export const users = { + user1: 'user1' as AccountUuid, + user2: 'user2' as AccountUuid +} + +export const wsDiscovery = new StaticWorkspaceDiscovery({ + [users.user1]: [workspaces.ws1, workspaces.ws2, workspaces.ws3], + [users.user2]: [workspaces.ws4, workspaces.ws5, workspaces.ws6], + [workspaces.ws1]: [workspaces.ws7, workspaces.ws8], + [workspaces.ws8]: [workspaces.ws9, workspaces.ws10] +}) diff --git a/network/zeromq/src/__test__/zmq.spec.ts b/network/zeromq/src/__test__/zmq.spec.ts new file mode 100644 index 00000000000..5357cb9e8bf --- /dev/null +++ b/network/zeromq/src/__test__/zmq.spec.ts @@ -0,0 +1,237 @@ +import * as zmq from 'zeromq' + +describe('zmq-tests', () => { + it('check reconnect', async () => { + // Simulate a reconnect event + + const router = new zmq.Router() + + await router.bind('tcp://0.0.0.0:7654') + + const request = new zmq.Request() + request.connect('tcp://localhost:7654') + + await request.send('Hello') + + const data = await router.receive() + expect(data[2].toString()).toBe('Hello') + + await router.send([data[0], data[1], 'World']) + + const result2 = await request.receive() + expect(result2.toString()).toBe('World') + + const obs1 = new zmq.Observer(request) + + let closed = false + obs1.on('close', (dta) => { + closed = true + console.log('closed', dta.address) + }) + + router.close() + + // eslint-disable-next-line no-unmodified-loop-condition + while (!closed) { + await new Promise((resolve) => { + setTimeout(() => { + resolve() + }, 10) + }) + } + + request.close() + }) + it('check diff order', async () => { + // Simulate a reconnect event + + const router = new zmq.Router() + + await router.bind('tcp://0.0.0.0:7654') + + const request = new zmq.Request() + request.connect('tcp://localhost:7654') + + const request2 = new zmq.Request() + request2.connect('tcp://localhost:7654') + + await request.send('Hello1') + await request2.send('Hello2') + const data = await router.receive() + const data2 = await router.receive() + + expect(data[2].toString()).toBe('Hello1') + + expect(data2[2].toString()).toBe('Hello2') + + await router.send([data2[0], data2[1], 'World2']) + await router.send([data[0], data[1], 'World1']) + + const result2 = await request.receive() + expect(result2.toString()).toBe('World1') + + const result3 = await request2.receive() + expect(result3.toString()).toBe('World2') + request.close() + request2.close() + router.close() + }) + + it('check multiple requests from same client', async () => { + // Create router socket (server) + const router = new zmq.Pull() + await router.bind('tcp://0.0.0.0:7654') + + const routerPub = new zmq.Publisher() + await routerPub.bind('tcp://0.0.0.0:7655') + + // Create request socket (client) + const client = new zmq.Push() + client.connect('tcp://localhost:7654') + + const clientSub = new zmq.Subscriber() + clientSub.connect('tcp://localhost:7655') + clientSub.subscribe('client1') + + await client.send('Hello1') + await client.send('Hello2') + await client.send('Hello3') + + const d1 = await router.receive() + const d2 = await router.receive() + const d3 = await router.receive() + + expect(d1[0].toString()).toBe('Hello1') + expect(d2[0].toString()).toBe('Hello2') + expect(d3[0].toString()).toBe('Hello3') + + await routerPub.send(['client1', '', 'World1']) + await routerPub.send(['client1', '', 'World2']) + await routerPub.send(['client1', '', 'World3']) + + const result1 = await clientSub.receive() + const result2 = await clientSub.receive() + const result3 = await clientSub.receive() + + expect(result1[2].toString()).toBe('World1') + expect(result2[2].toString()).toBe('World2') + expect(result3[2].toString()).toBe('World3') + + // Cleanup + client.close() + clientSub.close() + router.close() + routerPub.close() + }) + + it('random port text', async () => { + // Simulate a reconnect event + + const router = new zmq.Router() + + await router.bind('tcp://*:0') + + const reqEndpoint: string = router.lastEndpoint as string + expect(reqEndpoint).toBeDefined() + + const portMatch = reqEndpoint.match(/:(\d+)$/) + const port = portMatch != null ? parseInt(portMatch[1]) : 0 + + expect(port).toBeGreaterThan(0) + + const request = new zmq.Request() + request.connect(`tcp://localhost:${port}`) + + await request.send('Hello') + + const data = await router.receive() + expect(data[2].toString()).toBe('Hello') + + await router.send([data[0], data[1], 'World']) + + const result2 = await request.receive() + expect(result2.toString()).toBe('World') + + router.close() + request.close() + }) + + it('dealer check', async () => { + // Simulate a reconnect event + + const router = new zmq.Router() + + await router.bind('tcp://*:0') + + const reqEndpoint: string = router.lastEndpoint as string + expect(reqEndpoint).toBeDefined() + + const portMatch = reqEndpoint.match(/:(\d+)$/) + const port = portMatch != null ? parseInt(portMatch[1]) : 0 + + expect(port).toBeGreaterThan(0) + + const request = new zmq.Dealer() + request.connect(`tcp://localhost:${port}`) + + await request.send('Hello') + await request.send('Hello2') + + const data = await router.receive() + const data2 = await router.receive() + expect(data[1].toString()).toBe('Hello') + expect(data2[1].toString()).toBe('Hello2') + + await router.send([data2[0], 'World']) + await router.send([data[0], 'World']) + + let result2 = await request.receive() + expect(result2.toString()).toBe('World') + + result2 = await request.receive() + expect(result2.toString()).toBe('World') + + router.close() + request.close() + }) + + it('client broadcast check', async () => { + // Simulate a reconnect event + + const router = new zmq.Router() + + await router.bind('tcp://*:0') + + const reqEndpoint: string = router.lastEndpoint as string + expect(reqEndpoint).toBeDefined() + + const portMatch = reqEndpoint.match(/:(\d+)$/) + const port = portMatch != null ? parseInt(portMatch[1]) : 0 + + expect(port).toBeGreaterThan(0) + + const request = new zmq.Dealer() + request.connect(`tcp://localhost:${port}`) + + await request.send('Hello') + + const data = await router.receive() + expect(data[1].toString()).toBe('Hello') + + await router.send([data[0], 'World']) + await router.send([data[0], 'World2']) + await router.send([data[0], 'World3']) + + let result2 = await request.receive() + expect(result2.toString()).toBe('World') + + result2 = await request.receive() + expect(result2.toString()).toBe('World2') + + result2 = await request.receive() + expect(result2.toString()).toBe('World3') + + router.close() + request.close() + }) +}) diff --git a/network/zeromq/src/agent.ts b/network/zeromq/src/agent.ts new file mode 100644 index 00000000000..68a15a3b7ec --- /dev/null +++ b/network/zeromq/src/agent.ts @@ -0,0 +1,209 @@ +import { + type ClientUuid, + type ContainerConnection, + type ContainerEvent, + type ContainerUuid, + type NetworkAgent, + type TickManager +} from '@hcengineering/network' +import { + BackRPCClient, + BackRPCServer, + type BackRPCResponseSend, + type BackRPCServerHandler, + type ClientId +} from './backrpc' +import { agentDirectRef } from './endpoints' +import { opNames } from './types' + +/** + * A server for an agent with connection abilities. + * + * start method should be called before agent will be registered on network. + */ +export class NetworkAgentServer implements BackRPCServerHandler { + readonly rpcServer: BackRPCServer + agent: NetworkAgent | undefined + + constructor ( + tickMgr: TickManager, + readonly endpointHost: string, // An endpoint construction host, will be used to register + host: string = '*', // A socket visibility + port: number = 3738 // If 0, port will be free random one. + ) { + this.rpcServer = new BackRPCServer(this, tickMgr, host, port) + } + + async start (agent: NetworkAgent): Promise { + this.agent = agent + this.agent.endpoint = agentDirectRef(this.endpointHost, await this.rpcServer.getPort(), agent.uuid) + + // Now registration is possible, or update will be sent + await this.agent.onAgentUpdate?.() + } + + async onContainerUpdate (event: ContainerEvent): Promise { + // Handle container update + } + + async getPort (): Promise { + return await this.rpcServer.getPort() + } + + async close (): Promise { + this.rpcServer.close() + } + + async requestHandler (client: ClientUuid, method: string, params: any, send: BackRPCResponseSend): Promise { + if (this.agent === undefined) { + return + } + switch (method) { + case opNames.connect: { + const uuids: ContainerUuid[] = Array.isArray(params.uuid) ? params.uuid : [params.uuid] + const connected: number = 0 + for (const uuid of uuids) { + const container = await this.agent.getContainer(uuid) + if (container === undefined) { + console.error(`Container ${uuid} not found`) + continue + } + // Events will be routed via connectionId + container.connect(client, async (data) => { + await this.rpcServer.send(client, [uuid, data]) + }) + } + await send(connected) + break + } + case opNames.disconnect: { + const uuid = params.uuid as ContainerUuid + const container = await this.agent.getContainer(uuid) + if (container === undefined) { + throw new Error('Container not found') + } + container.disconnect(client) + await send('ok') + break + } + case opNames.sendContainer: { + const target: ContainerUuid = params[0] + const operation: string = params[1] + const data: any = params[2] + + const container = await this.agent.getContainer(target) + if (container === undefined) { + throw new Error('Container not found') + } + await send(await container.request(operation, data, client)) + break + } + + default: + throw new Error('Unknown method' + method) + } + } + + async helloHandler (clientId: ClientUuid): Promise { + console.log(`Client ${clientId} connected`) + } + + async handleTimeout (client: ClientUuid): Promise { + console.log(`Client ${client} timed out`) + } +} + +/** + * An routed connection to a container via agent. + */ +export class RoutedNetworkAgentConnectionImpl { + private readonly client: BackRPCClient + + containers = new Map() + + constructor ( + tickMgr: TickManager, + readonly clientId: ClientT, + readonly host: string, + readonly port: number + ) { + this.client = new BackRPCClient(this.clientId, this, host, port, tickMgr) + } + + async connect (containerUuid: ContainerUuid): Promise { + // Establish a connection to the specified container + await this.client.request(opNames.connect, { uuid: containerUuid }) + + const connection: ContainerConnection = { + containerId: containerUuid, + close: async () => { + await this.client.request(opNames.disconnect, { uuid: containerUuid }) + }, + request: async (operation, data) => + await this.client.request(opNames.sendContainer, [containerUuid, operation, data]) + } + this.containers.set(containerUuid, connection) + return connection + } + + async requestHandler (method: string, params: any, send: BackRPCResponseSend): Promise { + // No callback is required + } + + async onEvent (event: any): Promise { + const [container, data] = event + const connection = this.containers.get(container) + if (connection !== undefined) { + await connection.on?.(data) + } + } + + async close (): Promise { + this.client.close() + } + + async onRegister (): Promise { + // Handle registration logic here + } +} + +/** + * A direct connection to container + */ +export class NetworkDirectConnectionImpl implements ContainerConnection { + private readonly client: BackRPCClient + + on?: ((data: any) => Promise) | undefined + + containers: ContainerUuid[] = [] + + constructor ( + tickMgr: TickManager, + readonly clientId: ClientUuid, + readonly containerId: ContainerUuid, + readonly host: string, + readonly port: number + ) { + this.client = new BackRPCClient(this.clientId, this, host, port, tickMgr) + } + + async request (operation: string, data?: any): Promise { + return await this.client.request(operation, data) + } + + async requestHandler (method: string, params: any, send: BackRPCResponseSend): Promise { + // No callback is required + } + + async onEvent (event: any): Promise { + await this.on?.(event) + } + + async close (): Promise { + this.client.close() + } + + async onRegister (): Promise { + // No registration is required + } +} diff --git a/network/zeromq/src/backrpc.ts b/network/zeromq/src/backrpc.ts new file mode 100644 index 00000000000..c7b118cb960 --- /dev/null +++ b/network/zeromq/src/backrpc.ts @@ -0,0 +1,389 @@ +// +// Copyright © 2025 Hardcore Engineering Inc. +// +// Licensed under the Eclipse Public License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. You may +// obtain a copy of the License at https://www.eclipse.org/legal/epl-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +import { timeouts, type TickManager } from '@hcengineering/network' +import { v4 as uuidv4 } from 'uuid' +import * as zmq from 'zeromq' + +const backrpcOperations = { + hello: 0, + request: 1, + response: 2, + responseError: 3, + event: 4, + ping: 5, + pong: 6 +} + +export type ClientId = string & { __clientId: string } + +export interface BackRPCServerHandler { + requestHandler: ( + client: ClientT, + method: string, + params: any, + send: (response: any) => Promise + ) => Promise + helloHandler?: (client: ClientT) => Promise + handleTimeout?: (client: ClientT) => Promise +} + +export class BackRPCServer { + uuid = uuidv4() + + private readonly router: zmq.Router + + private requestCounter: number = 0 + + private readonly requests = new Map void, reject: (err: any) => void }>() + + private readonly clientMapping = new Map() + private readonly revClientMapping = new Map() + + private closed: boolean = false + + private bound: Promise | undefined + + constructor ( + private readonly handlers: BackRPCServerHandler, + private readonly tickMgr: TickManager, + readonly host: string = '*', + private readonly port: number = 0 + ) { + this.router = new zmq.Router() + + this.tickMgr.register(() => { + void this.checkAlive() + }, timeouts.pingInterval) + + void this.start() + } + + async checkAlive (): Promise { + const now = this.tickMgr.now() + // Handle outdated clients + for (const [clientId, clientRecord] of this.revClientMapping.entries()) { + const timeSinceLastSeen = now - clientRecord.lastSeen + + if (timeSinceLastSeen > timeouts.aliveTimeout * 1000) { + console.warn(`Client ${clientId} has been inactive for ${Math.round(timeSinceLastSeen / 1000)}s, marking as dead`) + await this.handlers.handleTimeout?.(clientRecord.id) + } + this.revClientMapping.delete(clientId) + this.clientMapping.delete(clientRecord.id) + } + } + + async getPort (): Promise { + await this.bound + const reqEndpoint = this.router.lastEndpoint + if (reqEndpoint === null) { + throw new Error('Router is not bound to an endpoint') + } + + const portMatch = reqEndpoint.match(/:(\d+)$/) + const port = portMatch != null ? parseInt(portMatch[1]) : undefined + if (port === undefined) { + throw new Error('Router is not bound to a port') + } + return port + } + + private async start (): Promise { + this.bound = this.router.bind(`tcp://${this.host}:${this.port}`) + await this.bound + + // Read messages from clients. + for await (const msg of this.router) { + if (this.closed) { + return + } + try { + const clientId = msg[0] + const clientIdText = clientId.toString('base64') + const [operation, reqId, payload] = [parseInt(msg[1].toString()), msg[2].toString(), msg[3]] + + const client = this.revClientMapping.get(clientIdText) + if (client !== undefined) { + client.lastSeen = this.tickMgr.now() + } + switch (operation) { + case backrpcOperations.hello: + // Remember clientId to be able to do back requests. + if (!this.clientMapping.has(reqId.toString() as ClientT)) { + await this.handlers.helloHandler?.(reqId.toString() as ClientT) + } + this.clientMapping.set(reqId.toString() as ClientT, clientId) + this.revClientMapping.set(clientIdText, { + id: reqId.toString() as ClientT, + lastSeen: this.tickMgr.now() + }) + await this.router.send([clientId, backrpcOperations.hello, this.uuid, '']) + break + case backrpcOperations.ping: { + await this.router.send([clientId, backrpcOperations.pong, this.uuid, '']) + break + } + case backrpcOperations.request: + { + const [method, params] = JSON.parse(payload.toString()) + if (client === undefined) { + console.error(`Client ${clientId.toString()} not found`) + } else { + const sendError = async (err: Error): Promise => { + await this.router.send([ + clientId, + backrpcOperations.responseError, + reqId, + JSON.stringify({ + message: err.message ?? '', + stack: err.stack + }) + ]) + } + void this.handlers.requestHandler( + client.id, + method, + params, + async (response: any) => { + await this.router.send([clientId, backrpcOperations.response, reqId, JSON.stringify(response)]) + } + ).catch((err) => { + void sendError(err) + }) + } + } + break + case backrpcOperations.response: { + const reqID = reqId.toString() + const req = this.requests.get(reqID) + try { + req?.resolve(JSON.parse(payload.toString())) + } catch (err: any) { + console.error(err) + } + this.requests.delete(reqID) + break + } + case backrpcOperations.responseError: { + const reqID = reqId.toString() + const req = this.requests.get(reqID) + try { + req?.reject(JSON.parse(payload.toString())) + } catch (err: any) { + console.error(err) + } + this.requests.delete(reqID) + break + } + } + } catch (err: any) { + console.error(err) + } + } + } + + async request (clientId: ClientT, method: string, params: any): Promise { + const clientIdentity = this.clientMapping.get(clientId) + if (clientIdentity === undefined) { + throw new Error(`Client ${clientId} not found`) + } + return await new Promise((resolve, reject) => { + const reqId = clientId + '-' + this.requestCounter++ + this.requests.set(reqId, { resolve, reject }) + + void this.router + .send([clientIdentity, backrpcOperations.request, reqId, JSON.stringify([method, params])]) + .catch((err) => { + reject(err) + }) + }) + } + + async send (clientId: ClientT, body: any): Promise { + const clientIdentity = this.clientMapping.get(clientId) + if (clientIdentity === undefined) { + throw new Error(`Client ${clientId as string} not found`) + } + await this.router.send([clientIdentity, backrpcOperations.event, '', JSON.stringify(body)]) + } + + async close (): Promise { + this.closed = true + this.router.close() + } +} + +export type BackRPCResponseSend = (response: any) => Promise +export interface BackRPCClientHandler { + requestHandler: ( + method: string, + params: any, + send: BackRPCResponseSend + ) => Promise + onRegister?: () => Promise + onEvent?: (event: any) => Promise +} + +export class BackRPCClient { + serverId: string | Promise + dealer: zmq.Dealer + + requestCounter: number = 0 + + requests = new Map void, reject: (err: any) => void }>() + closed: boolean = false + + observer: zmq.Observer + + setServerId: (serverId: string) => void + + constructor ( + readonly clientId: ClientT, + readonly client: BackRPCClientHandler, + readonly host: string, + readonly port: number, + readonly tickMgr: TickManager, + options?: zmq.SocketOptions + ) { + this.dealer = new zmq.Dealer(options) + this.dealer.connect(`tcp://${host}:${port}`) + + this.setServerId = () => {} + this.serverId = new Promise((resolve) => { + this.setServerId = (serverId) => { + this.serverId = serverId + resolve(serverId) + } + }) + + this.observer = new zmq.Observer(this.dealer) + this.observer.on('connect', (data) => { + void this.sendHello() + }) + void this.start() + + this.tickMgr.register(() => { + void this.checkAlive() + }, timeouts.pingInterval) + } + + async checkAlive (): Promise { + await this.dealer.send([backrpcOperations.ping, this.clientId as string, '', '']) + } + + private async sendHello (): Promise { + await this.dealer.send([backrpcOperations.hello, this.clientId as string, '', '']) + } + + private async start (): Promise { + // Read messages from clients. + for await (const msg of this.dealer) { + if (this.closed) { + return + } + try { + const [operation, reqId, payload] = [parseInt(msg[0].toString()), msg[1].toString(), msg[2]] + switch (operation) { + case backrpcOperations.hello: { + const serverUuid = reqId.toString() + if (this.serverId !== serverUuid) { + this.setServerId(serverUuid) + void this.client.onRegister?.()?.catch(err => { + console.error('Failed to register client', err) + }) + } + break + } + case backrpcOperations.request: + { + const [method, params] = JSON.parse(payload.toString()) + void this.client.requestHandler( + method, + params, + async (response: any) => { + await this.dealer.send([backrpcOperations.response, reqId, JSON.stringify(response)]) + } + ).catch(error => { + void this.dealer.send([ + backrpcOperations.responseError, + reqId, + JSON.stringify({ + message: error.message ?? '', + stack: error.stack ?? '' + }) + ]).catch(err2 => { + console.error('Failed to send error', err2, err2) + }) + }) + } + break + case backrpcOperations.response: { + const req = this.requests.get(reqId) + try { + req?.resolve(JSON.parse(payload.toString())) + } catch (err: any) { + console.error(err) + } + this.requests.delete(reqId) + break + } + case backrpcOperations.responseError: { + const req = this.requests.get(reqId) + try { + req?.reject(JSON.parse(payload.toString())) + } catch (err: any) { + console.error(err) + } + this.requests.delete(reqId) + break + } + case backrpcOperations.event: { + void this.client.onEvent?.(JSON.parse(payload.toString())).catch(err => { + console.error('Failed to handle event', err) + }) + break + } + } + } catch (err: any) { + console.error(err) + } + } + } + + async request(method: string, params: any): Promise { + if (this.serverId instanceof Promise) { + await this.serverId + } + return await new Promise((resolve, reject) => { + const reqId = this.clientId + '-' + this.requestCounter++ + this.requests.set(reqId, { resolve, reject }) + + void this.dealer.send([backrpcOperations.request, reqId, JSON.stringify([method, params])]).catch((err) => { + reject(err) + this.requests.delete(reqId) + }) + }) + } + + async send (body: any): Promise { + await this.dealer.send([backrpcOperations.event, body]) + } + + close (): void { + this.closed = true + this.dealer.close() + } +} diff --git a/network/zeromq/src/client.ts b/network/zeromq/src/client.ts new file mode 100644 index 00000000000..45eaac340fb --- /dev/null +++ b/network/zeromq/src/client.ts @@ -0,0 +1,276 @@ +import { + type AgentEndpointRef, + type AgentRecord, + type AgentUuid, + type ClientUuid, + type ContainerConnection, + type ContainerEndpointRef, + type ContainerEvent, + type ContainerKind, + type ContainerRecord, + type ContainerReference, + type ContainerRequest, + type ContainerUpdateListener, + type ContainerUuid, + type NetworkAgent, + type NetworkClient, + type TickManager +} from '@hcengineering/network' +import { v4 as uuidv4 } from 'uuid' +import { NetworkDirectConnectionImpl, RoutedNetworkAgentConnectionImpl } from './agent' +import { BackRPCClient, type BackRPCResponseSend } from './backrpc' +import { agentDirectRef, EndpointKind, parseEndpointRef } from './endpoints' +import { opNames } from './types' + +interface ClientAgentRecord { + agent: NetworkAgent + register: Promise + resolve: () => void +} +/** + * Huly Network client + * + * Some methods are omit clientId parameter. + */ +export class NetworkClientImpl implements NetworkClient { + clientId: ClientUuid = uuidv4() as ClientUuid + + private readonly client: BackRPCClient + + private readonly _agents = new Map() + + // A set of clients for individual containers or agent TORs + endpointConnections = new Map>() + agentConnections = new Map>() + + containerListeners: ContainerUpdateListener[] = [] + + references = new Map() + + registered: boolean = false + + constructor ( + readonly host: string, + port: number, + private readonly tickMgr: TickManager + ) { + this.client = new BackRPCClient(this.clientId, this, host, port, tickMgr) + } + + async close (): Promise { + this.client.close() + for (const agentConn of this.agentConnections.values()) { + await agentConn.close() + } + } + + async requestHandler (method: string, params: any, send: BackRPCResponseSend): Promise { + const [agentId, agentParams] = params + // Pass agent methods to a proper agent + const { agent } = this._agents.get(agentId) ?? { agent: undefined } + if (agent === undefined) { + await send({ error: `Agent ${agentId} not found` }) + return + } + switch (method) { + case opNames.getContainer: + await send(await agent.get(agentParams[0], agentParams[1])) + break + case opNames.listContainers: + await send(await agent.list(agentParams[0])) + break + case opNames.sendContainer: + await send(await agent.request(agentParams[0], agentParams[1], agentParams[2])) + break + default: + throw new Error('Unknown method') + } + } + + async onEvent (event: ContainerEvent): Promise { + // Handle container events + + await this.handleConnectionUpdates(event) + + // In case of container stopped, agent stopped or endpoint changed, we need to update direct connections to be re-established. + for (const listener of this.containerListeners) { + try { + await listener(event) + } catch (error) { + console.error('Error in container listener:', error) + } + } + } + + async onRegister (): Promise { + this.registered = true + // We need to re-register all our managed agents + for (const agent of this._agents.values()) { + await this.doRegister(agent.agent) + } + } + + /** + * Register a new agent, agent could or could not provide an endpoint for routed connections. + */ + async register (agent: NetworkAgent): Promise { + const rec: ClientAgentRecord = { + agent, + register: Promise.resolve(), + resolve: () => {} + } + rec.register = new Promise((resolve) => { + rec.resolve = resolve + }) + this._agents.set(agent.uuid, rec) + + agent.onUpdate = async (event) => { + await this.client.request(opNames.containerUpdate, event) + } + agent.onAgentUpdate = async () => { + await this.doRegister(agent) + } + + if (this.registered) { + await this.doRegister(agent) + } + await rec.register + } + + async doRegister (agent: NetworkAgent): Promise { + const containers: ContainerRecord[] = [] + for (const container of await agent.list()) { + containers.push({ + agentId: agent.uuid, + uuid: container.uuid, + endpoint: container.endpoint, + kind: container.kind, + lastVisit: container.lastVisit + } satisfies ContainerRecord) + } + const toClean = await this.client.request(opNames.register, { + uuid: agent.uuid, + containers, + kinds: agent.kinds, + endpoint: agent.endpoint + }) + for (const uuid of toClean) { + await agent.terminate(uuid) + } + this._agents.get(agent.uuid)?.resolve() + } + + async agents (): Promise { + // Return actual list of agents + return await this.client.request(opNames.getAgents, {}) + } + + async kinds (): Promise { + return await this.client.request(opNames.getKinds, {}) + } + + async get (uuid: ContainerUuid, request: ContainerRequest): Promise { + const existing = this.references.get(uuid) + if (existing !== undefined) { + return existing + } + const endpoint = await this.client.request(opNames.getContainer, { + uuid, + request + }) + const ref: ContainerReference = { + uuid, + endpoint, + close: async () => { + await this.release(uuid) + this.references.delete(uuid) + }, + request: (data) => this.request(uuid, data), + connect: (timeout?: number) => this.establishConnection(endpoint, timeout) + } + this.references.set(uuid, ref) + return ref + } + + private async establishConnection (endpoint: ContainerEndpointRef, timeout?: number): Promise { + let conn = this.endpointConnections.get(endpoint) + if (conn !== undefined) { + if (conn instanceof Promise) { + conn = await conn + this.endpointConnections.set(endpoint, conn) + } + return conn + } + // Check if connection is routed + const parsedRef = parseEndpointRef(endpoint) + if (parsedRef.uuid === undefined) { + throw new Error('Invalid endpoint reference') + } + if (parsedRef.kind === EndpointKind.noconnect) { + throw new Error('No connection available') + } + if (parsedRef.kind === EndpointKind.routed) { + const agentRef = agentDirectRef(parsedRef.host, parsedRef.port, parsedRef.agentId) + let agentConn = this.agentConnections.get(agentRef) + if (agentConn === undefined) { + agentConn = new RoutedNetworkAgentConnectionImpl( + this.tickMgr, + this.clientId, + parsedRef.host, + parsedRef.port + ) + this.agentConnections.set(agentRef, agentConn) + } + const conn = agentConn.connect(parsedRef.uuid) + this.endpointConnections.set(endpoint, conn) + return await conn + } + const directConn = new NetworkDirectConnectionImpl( + this.tickMgr, + this.clientId, + parsedRef.uuid, + parsedRef.host, + parsedRef.port + ) + this.endpointConnections.set(endpoint, directConn) + return directConn + } + + async handleConnectionUpdates (event: ContainerEvent): Promise { + // Handle connection updates + // TODO: Fix me + for (const updated of event.updated) { + if (this.references.has(updated.uuid)) { + const ref = this.references.get(updated.uuid) + if (ref !== undefined) { + ref.endpoint = updated.endpoint + } + } + } + for (const deleted of event.deleted) { + const ref = this.references.get(deleted.uuid) + if (ref !== undefined) { + // We need to re request endpoint and update both endpoint and connection + } + } + } + + async release (uuid: ContainerUuid): Promise { + await this.client.request(opNames.releaseContainer, { uuid }) + } + + async list (kind: ContainerKind): Promise { + return await this.client.request(opNames.listContainers, { + kind + }) + } + + // Send some data to container, using proxy connection. + async request (target: ContainerUuid, operation: string, data?: any): Promise { + return await this.client.request(opNames.sendContainer, [target, operation, data]) + } + + onContainerUpdate (listener: ContainerUpdateListener): void { + this.containerListeners.push(listener) + } +} diff --git a/network/zeromq/src/endpoints.ts b/network/zeromq/src/endpoints.ts new file mode 100644 index 00000000000..b64b181ab86 --- /dev/null +++ b/network/zeromq/src/endpoints.ts @@ -0,0 +1,60 @@ +import type { AgentEndpointRef, AgentUuid, ContainerEndpointRef, ContainerUuid } from '@hcengineering/network' + +export enum EndpointKind { + routed, // Container is routed via host as router, host is BackRPCServer, and send operation should be used to pass data to target. + direct, // A direct connection to container. + noconnect // No connection to container via network, only send on network. +} +// An Agent or Container endpoint referenfe to establish a direct connection to. +export interface EndpointRefData { + kind: EndpointKind + host: string + port: number + agentId: AgentUuid + uuid?: ContainerUuid +} + +export function agentDirectRef (host: string, port: number, uuid: AgentUuid): AgentEndpointRef { + return JSON.stringify({ + host, + port, + kind: EndpointKind.direct, + agentId: uuid + } satisfies EndpointRefData) as AgentEndpointRef +} + +export function agentNoConnectRef (uuid: AgentUuid): AgentEndpointRef { + return JSON.stringify({ + kind: EndpointKind.noconnect, + agentId: uuid, + host: '', + port: 0 + } satisfies EndpointRefData) as AgentEndpointRef +} + +export function containerOnAgentEndpointRef ( + agentEndpoint: AgentEndpointRef, + container: ContainerUuid +): ContainerEndpointRef { + const agentData = JSON.parse(agentEndpoint) as EndpointRefData + return JSON.stringify({ + kind: agentData.kind === EndpointKind.noconnect ? agentData.kind : EndpointKind.routed, + host: agentData.host, + port: agentData.port, + agentId: agentData.agentId, + uuid: container + } satisfies EndpointRefData) as ContainerEndpointRef +} + +export function parseEndpointRef (ref: AgentEndpointRef | ContainerEndpointRef): EndpointRefData { + return JSON.parse(ref) as EndpointRefData +} +export function containerDirectRef (host: string, port: number, uuid: ContainerUuid, agentId: AgentUuid): ContainerEndpointRef { + return JSON.stringify({ + kind: EndpointKind.direct, + host, + port, + uuid, + agentId + } satisfies EndpointRefData) as ContainerEndpointRef +} diff --git a/network/zeromq/src/index.ts b/network/zeromq/src/index.ts new file mode 100644 index 00000000000..fd7bbe7dba9 --- /dev/null +++ b/network/zeromq/src/index.ts @@ -0,0 +1,3 @@ +export * from './types' +export * from './client' +export * from './server' diff --git a/network/zeromq/src/server.ts b/network/zeromq/src/server.ts new file mode 100644 index 00000000000..0bd2e43d53b --- /dev/null +++ b/network/zeromq/src/server.ts @@ -0,0 +1,144 @@ +import { + type AgentEndpointRef, + type AgentUuid, + type ClientUuid, + type Container, + type ContainerEndpointRef, + type ContainerKind, + type ContainerRecord, + type ContainerRequest, + type ContainerUuid, + type Network, + type NetworkAgent, + type NetworkWithClients, + type TickManager +} from '@hcengineering/network' +import { BackRPCServer, type BackRPCResponseSend, type BackRPCServerHandler } from './backrpc' +import { opNames } from './types' + +class AgentCallbackHandler implements NetworkAgent { + constructor ( + readonly rpcServer: BackRPCServer, + readonly uuid: AgentUuid, + readonly endpoint: AgentEndpointRef, + readonly kinds: ContainerKind[], + readonly client: ClientUuid + ) {} + + async get (uuid: ContainerUuid, request: ContainerRequest): Promise { + return await this.rpcServer.request(this.client, opNames.getContainer, [this.uuid, [uuid, request]]) + } + + async list (kind?: ContainerKind): Promise { + return await this.rpcServer.request(this.client, opNames.listContainers, [this.uuid, [kind]]) + } + + async request (target: ContainerUuid, operation: string, data?: any): Promise { + return await this.rpcServer.request(this.client, opNames.sendContainer, [this.uuid, [target, operation, data]]) + } + + async terminate (): Promise { + // Ignore + } + + async getContainer (uuid: ContainerUuid): Promise { + return undefined + } +} + +export class NetworkServer implements BackRPCServerHandler { + rpcServer: BackRPCServer + constructor ( + readonly network: Network & NetworkWithClients, + readonly tickMgr: TickManager, + host: string = '*', + port: number = 3737 + ) { + this.rpcServer = new BackRPCServer(this, tickMgr, host, port) + } + + async close (): Promise { + this.rpcServer.close() + } + + async requestHandler (client: ClientUuid, method: string, params: any, send: BackRPCResponseSend): Promise { + switch (method) { + case opNames.register: { + // Handle register + await this.handleRegister(params, this.rpcServer, client, send) + break + } + case opNames.getAgents: { + await send(await this.network.agents()) + break + } + case opNames.getKinds: { + await send(await this.network.kinds()) + break + } + case opNames.getContainer: { + const uuid: ContainerUuid = params.uuid + const request: ContainerRequest = params.request + await send(await this.network.get(client, uuid, request)) + break + } + case opNames.releaseContainer: { + const uuid: ContainerUuid = params.uuid + await this.network.release(client, uuid) + await send('ok') + break + } + case opNames.listContainers: { + const kind: ContainerKind = params.kind + await send(await this.network.list(kind)) + break + } + case opNames.sendContainer: { + const target: ContainerUuid = params[0] + const operation: string = params[1] + const data: any = params[2] + await send(await this.network.request(target, operation, data)) + break + } + + default: + throw new Error('Unknown method' + method) + } + } + + async helloHandler (clientId: ClientUuid): Promise { + console.log(`Client ${clientId} connected`) + this.network.addClient(clientId, async (event) => { + console.log(`Client ${clientId} received container event:`, event) + + await await this.rpcServer.send(clientId, event) + }) + } + + async handleTimeout (client: ClientUuid): Promise { + console.log(`Client ${client} timed out`) + this.network.removeClient(client) + } + + private async handleRegister ( + params: any, + server: BackRPCServer, + client: ClientUuid, + send: (response: any) => Promise + ): Promise { + const agentUuid: AgentUuid = params.uuid + const containers: ContainerRecord[] = params.containers + const kinds: ContainerKind[] = params.kinds + const endpoint: AgentEndpointRef = params.endpoint + const res = await this.network.register( + { + agentId: agentUuid, + containers, + kinds, + endpoint + }, + new AgentCallbackHandler(server, agentUuid, endpoint, kinds, client) + ) + await send(res) + } +} diff --git a/network/zeromq/src/types.ts b/network/zeromq/src/types.ts new file mode 100644 index 00000000000..63eafa5931e --- /dev/null +++ b/network/zeromq/src/types.ts @@ -0,0 +1,16 @@ +export const opNames = { + // NetworkOperations + register: 'r', + unregister: 'u', + getAgents: 'a', + getKinds: 'k', + listContainers: 'l', + getContainer: 'g', + releaseContainer: 'r', + sendContainer: 's', + // Agent operations + containerUpdate: 'c', + terminate: 't', + connect: 'c!', + disconnect: 'd' +} diff --git a/network/zeromq/tsconfig.json b/network/zeromq/tsconfig.json new file mode 100644 index 00000000000..c6a877cf6c3 --- /dev/null +++ b/network/zeromq/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "./node_modules/@hcengineering/platform-rig/profiles/node/tsconfig.json", + + "compilerOptions": { + "rootDir": "./src", + "outDir": "./lib", + "declarationDir": "./types", + "tsBuildInfoFile": ".build/build.tsbuildinfo" + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "lib", "dist", "types", "bundle"] +} \ No newline at end of file diff --git a/rush.json b/rush.json index 33787b5ff3e..63794dbc864 100644 --- a/rush.json +++ b/rush.json @@ -2663,6 +2663,15 @@ "projectFolder": "models/billing", "shouldPublish": false }, + { + "packageName": "@hcengineering/network", + "projectFolder": "network/core", + "shouldPublish": false + }, + { + "packageName": "@hcengineering/network-zeromq", + "projectFolder": "network/zeromq" + }, { "packageName": "@hcengineering/pod-process", "projectFolder": "services/process",