From f06749f3af75bb861176952313832f0c64d42906 Mon Sep 17 00:00:00 2001 From: benface Date: Fri, 13 Dec 2024 19:05:52 -0500 Subject: [PATCH 1/2] Update dependencies + add `fix-pages-structure` script and run it before build --- package.json | 16 +- packages/nextra-theme/package.json | 18 +- .../nextra-theme/src/layout/MDXLayoutNav.tsx | 4 + packages/og-image/package.json | 8 +- pnpm-lock.yaml | 7874 +++++++++-------- website/package.json | 23 +- website/pages/ar/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - website/pages/ar/deploying/hosted-service.mdx | 62 - .../pages/ar/deploying/subgraph-studio.mdx | 89 - .../ar/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/ar/developing/graph-ts/_meta.js | 5 + .../ar/managing/deprecate-a-subgraph.mdx | 24 - website/pages/ar/mips-faqs.mdx | 127 - website/pages/ar/network/_meta.js | 1 - website/pages/ar/querying/_meta.js | 1 - .../pages/ar/querying/graph-client/_meta.js | 5 + website/pages/cs/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/cs/deploying/hosted-service.mdx | 62 - .../pages/cs/deploying/subgraph-studio.mdx | 89 - .../cs/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/cs/developing/graph-ts/_meta.js | 5 + .../cs/managing/deprecate-a-subgraph.mdx | 24 - website/pages/cs/mips-faqs.mdx | 127 - website/pages/cs/querying/_meta.js | 1 - .../pages/cs/querying/graph-client/_meta.js | 5 + website/pages/de/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/de/deploying/hosted-service.mdx | 62 - .../pages/de/deploying/subgraph-studio.mdx | 89 - .../de/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/de/developing/graph-ts/_meta.js | 5 + .../de/managing/deprecate-a-subgraph.mdx | 24 - website/pages/de/mips-faqs.mdx | 127 - website/pages/de/querying/_meta.js | 1 - .../pages/de/querying/graph-client/_meta.js | 5 + website/pages/en/network/_meta.js | 1 + website/pages/en/publishing/_meta.js | 4 +- website/pages/en/release-notes/_meta.js | 5 +- website/pages/es/_meta.js | 19 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/es/deploying/hosted-service.mdx | 62 - .../pages/es/deploying/subgraph-studio.mdx | 89 - .../es/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/es/developing/graph-ts/_meta.js | 5 + .../es/managing/deprecate-a-subgraph.mdx | 24 - website/pages/es/mips-faqs.mdx | 127 - website/pages/es/network/_meta.js | 1 - website/pages/es/querying/_meta.js | 1 - .../pages/es/querying/graph-client/_meta.js | 5 + website/pages/fr/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/fr/deploying/hosted-service.mdx | 62 - .../pages/fr/deploying/subgraph-studio.mdx | 89 - .../fr/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/fr/developing/graph-ts/_meta.js | 5 + .../fr/developing/unit-testing-framework.mdx | 1 - .../fr/managing/deprecate-a-subgraph.mdx | 24 - website/pages/fr/mips-faqs.mdx | 127 - website/pages/fr/querying/_meta.js | 1 - .../pages/fr/querying/graph-client/_meta.js | 5 + website/pages/ha/_meta.js | 2 +- website/pages/ha/cookbook/avoid-eth-calls.mdx | 116 + website/pages/ha/cookbook/derivedfrom.mdx | 87 + ...pi-keys-using-nextjs-server-components.mdx | 123 + .../immutable-entities-bytes-as-ids.mdx | 190 + website/pages/ha/cookbook/pruning.mdx | 55 + .../deploying-a-subgraph-to-hosted.mdx | 291 - .../deploying-a-subgraph-to-studio.mdx | 70 - website/pages/ha/deploying/hosted-service.mdx | 62 - .../pages/ha/deploying/subgraph-studio.mdx | 89 - .../ha/developing/creating-a-subgraph.mdx | 1236 --- .../developing/creating-a-subgraph/_meta.js | 5 + .../creating-a-subgraph/advanced.mdx | 555 ++ .../assemblyscript-mappings.mdx | 113 + .../creating-a-subgraph/install-the-cli.mdx | 119 + .../creating-a-subgraph/ql-schema.mdx | 312 + .../starting-your-subgraph.mdx | 21 + .../creating-a-subgraph/subgraph-manifest.mdx | 534 ++ website/pages/ha/developing/graph-ts/_meta.js | 5 + .../ha/managing/deprecate-a-subgraph.mdx | 24 - website/pages/ha/mips-faqs.mdx | 127 - website/pages/ha/network/contracts.mdx | 29 + website/pages/ha/querying/_meta.js | 1 - .../pages/ha/querying/graph-client/_meta.js | 5 + website/pages/hi/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/hi/deploying/hosted-service.mdx | 62 - .../pages/hi/deploying/subgraph-studio.mdx | 89 - .../hi/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/hi/developing/graph-ts/_meta.js | 5 + .../hi/managing/deprecate-a-subgraph.mdx | 24 - website/pages/hi/mips-faqs.mdx | 127 - website/pages/hi/querying/_meta.js | 1 - .../pages/hi/querying/graph-client/_meta.js | 5 + website/pages/it/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/it/deploying/hosted-service.mdx | 62 - .../pages/it/deploying/subgraph-studio.mdx | 89 - .../it/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/it/developing/graph-ts/_meta.js | 5 + .../it/managing/deprecate-a-subgraph.mdx | 24 - website/pages/it/mips-faqs.mdx | 127 - website/pages/it/querying/_meta.js | 1 - .../pages/it/querying/graph-client/_meta.js | 5 + website/pages/ja/_meta.js | 18 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/ja/deploying/hosted-service.mdx | 62 - .../pages/ja/deploying/subgraph-studio.mdx | 89 - .../ja/developing/creating-a-subgraph.mdx | 1602 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/ja/developing/graph-ts/_meta.js | 5 + .../ja/managing/deprecate-a-subgraph.mdx | 24 - website/pages/ja/mips-faqs.mdx | 127 - website/pages/ja/network/_meta.js | 1 - website/pages/ja/querying/_meta.js | 1 - .../pages/ja/querying/graph-client/_meta.js | 5 + website/pages/ko/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/ko/deploying/hosted-service.mdx | 62 - .../pages/ko/deploying/subgraph-studio.mdx | 89 - .../ko/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/ko/developing/graph-ts/_meta.js | 5 + .../ko/managing/deprecate-a-subgraph.mdx | 24 - website/pages/ko/mips-faqs.mdx | 127 - website/pages/ko/network/_meta.js | 1 - website/pages/ko/querying/_meta.js | 1 - .../pages/ko/querying/graph-client/_meta.js | 5 + website/pages/mr/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/mr/deploying/hosted-service.mdx | 62 - .../pages/mr/deploying/subgraph-studio.mdx | 89 - .../mr/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/mr/developing/graph-ts/_meta.js | 5 + .../mr/managing/deprecate-a-subgraph.mdx | 24 - website/pages/mr/mips-faqs.mdx | 127 - website/pages/mr/querying/_meta.js | 1 - .../pages/mr/querying/graph-client/_meta.js | 5 + website/pages/nl/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/nl/deploying/hosted-service.mdx | 62 - .../pages/nl/deploying/subgraph-studio.mdx | 89 - .../nl/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/nl/developing/graph-ts/_meta.js | 5 + .../nl/managing/deprecate-a-subgraph.mdx | 24 - website/pages/nl/mips-faqs.mdx | 127 - website/pages/nl/querying/_meta.js | 1 - .../pages/nl/querying/graph-client/_meta.js | 5 + website/pages/pl/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/pl/deploying/hosted-service.mdx | 62 - .../pages/pl/deploying/subgraph-studio.mdx | 89 - .../pl/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/pl/developing/graph-ts/_meta.js | 5 + .../pl/managing/deprecate-a-subgraph.mdx | 24 - website/pages/pl/mips-faqs.mdx | 127 - website/pages/pl/querying/_meta.js | 1 - .../pages/pl/querying/graph-client/_meta.js | 5 + website/pages/pt/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/pt/deploying/hosted-service.mdx | 62 - .../pages/pt/deploying/subgraph-studio.mdx | 89 - .../pt/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/pt/developing/graph-ts/_meta.js | 5 + .../pt/managing/deprecate-a-subgraph.mdx | 24 - website/pages/pt/mips-faqs.mdx | 127 - website/pages/pt/querying/_meta.js | 1 - .../pages/pt/querying/graph-client/_meta.js | 5 + website/pages/ro/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/ro/deploying/hosted-service.mdx | 62 - .../pages/ro/deploying/subgraph-studio.mdx | 89 - .../ro/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/ro/developing/graph-ts/_meta.js | 5 + .../ro/managing/deprecate-a-subgraph.mdx | 24 - website/pages/ro/mips-faqs.mdx | 127 - website/pages/ro/querying/_meta.js | 1 - .../pages/ro/querying/graph-client/_meta.js | 5 + website/pages/ru/_meta.js | 3 +- .../deploying-a-subgraph-to-hosted.mdx | 293 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/ru/deploying/hosted-service.mdx | 62 - .../pages/ru/deploying/subgraph-studio.mdx | 89 - .../ru/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/ru/developing/graph-ts/_meta.js | 5 + .../ru/managing/deprecate-a-subgraph.mdx | 24 - website/pages/ru/mips-faqs.mdx | 127 - website/pages/ru/querying/_meta.js | 1 - .../pages/ru/querying/graph-client/_meta.js | 5 + website/pages/sv/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/sv/deploying/hosted-service.mdx | 62 - .../pages/sv/deploying/subgraph-studio.mdx | 89 - .../sv/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/sv/developing/graph-ts/_meta.js | 5 + .../sv/managing/deprecate-a-subgraph.mdx | 24 - website/pages/sv/mips-faqs.mdx | 127 - website/pages/sv/querying/_meta.js | 1 - .../pages/sv/querying/graph-client/_meta.js | 5 + website/pages/sw/_meta.js | 5 + website/pages/sw/arbitrum/_meta.js | 5 + website/pages/sw/cookbook/_meta.js | 5 + website/pages/sw/deploying/_meta.js | 5 + website/pages/sw/developing/_meta.js | 5 + .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/sw/developing/graph-ts/_meta.js | 5 + website/pages/sw/managing/_meta.js | 5 + website/pages/sw/network/_meta.js | 5 + website/pages/sw/publishing/_meta.js | 5 + website/pages/sw/querying/_meta.js | 5 + .../pages/sw/querying/graph-client/_meta.js | 5 + website/pages/sw/release-notes/_meta.js | 5 + website/pages/sw/sps/_meta.js | 5 + website/pages/sw/translations.ts | 13 + website/pages/tr/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/tr/deploying/hosted-service.mdx | 62 - .../pages/tr/deploying/subgraph-studio.mdx | 89 - .../tr/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/tr/developing/graph-ts/_meta.js | 5 + .../tr/managing/deprecate-a-subgraph.mdx | 24 - website/pages/tr/mips-faqs.mdx | 127 - website/pages/tr/querying/_meta.js | 1 - .../pages/tr/querying/graph-client/_meta.js | 5 + website/pages/uk/_meta.js | 18 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/uk/deploying/hosted-service.mdx | 62 - .../pages/uk/deploying/subgraph-studio.mdx | 89 - .../uk/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/uk/developing/graph-ts/_meta.js | 5 + .../uk/managing/deprecate-a-subgraph.mdx | 24 - website/pages/uk/mips-faqs.mdx | 127 - website/pages/uk/network/_meta.js | 1 - website/pages/uk/querying/_meta.js | 1 - .../pages/uk/querying/graph-client/_meta.js | 5 + website/pages/ur/_meta.js | 18 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/ur/deploying/hosted-service.mdx | 62 - .../pages/ur/deploying/subgraph-studio.mdx | 89 - .../ur/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/ur/developing/graph-ts/_meta.js | 5 + .../ur/managing/deprecate-a-subgraph.mdx | 24 - website/pages/ur/mips-faqs.mdx | 127 - website/pages/ur/network/_meta.js | 2 - website/pages/ur/querying/_meta.js | 1 - .../pages/ur/querying/graph-client/_meta.js | 5 + website/pages/vi/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/vi/deploying/hosted-service.mdx | 62 - .../pages/vi/deploying/subgraph-studio.mdx | 89 - .../vi/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/vi/developing/graph-ts/_meta.js | 5 + .../vi/managing/deprecate-a-subgraph.mdx | 24 - website/pages/vi/mips-faqs.mdx | 127 - website/pages/vi/network/_meta.js | 1 - website/pages/vi/querying/_meta.js | 1 - .../pages/vi/querying/graph-client/_meta.js | 5 + website/pages/yo/_meta.js | 2 +- .../deploying-a-subgraph-to-hosted.mdx | 295 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/yo/deploying/hosted-service.mdx | 62 - .../pages/yo/deploying/subgraph-studio.mdx | 89 - .../yo/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + .../creating-a-subgraph/advanced.mdx | 555 ++ .../assemblyscript-mappings.mdx | 113 + .../creating-a-subgraph/install-the-cli.mdx | 119 + .../creating-a-subgraph/ql-schema.mdx | 312 + .../starting-your-subgraph.mdx | 21 + .../creating-a-subgraph/subgraph-manifest.mdx | 534 ++ website/pages/yo/developing/graph-ts/_meta.js | 5 + .../yo/managing/deprecate-a-subgraph.mdx | 24 - website/pages/yo/mips-faqs.mdx | 127 - website/pages/yo/querying/_meta.js | 1 - .../pages/yo/querying/graph-client/_meta.js | 5 + website/pages/zh/_meta.js | 18 +- .../deploying-a-subgraph-to-hosted.mdx | 297 - .../deploying-a-subgraph-to-studio.mdx | 68 - website/pages/zh/deploying/hosted-service.mdx | 62 - .../pages/zh/deploying/subgraph-studio.mdx | 89 - .../zh/developing/creating-a-subgraph.mdx | 1601 ---- .../developing/creating-a-subgraph/_meta.js | 5 + website/pages/zh/developing/graph-ts/_meta.js | 5 + .../zh/managing/deprecate-a-subgraph.mdx | 24 - website/pages/zh/mips-faqs.mdx | 127 - website/pages/zh/network/_meta.js | 1 - website/pages/zh/querying/_meta.js | 1 - .../pages/zh/querying/graph-client/_meta.js | 5 + website/route-lockfile.txt | 178 +- website/scripts/fix-pages-structure.ts | 159 + 328 files changed, 8566 insertions(+), 55905 deletions(-) delete mode 100644 website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/ar/deploying/hosted-service.mdx delete mode 100644 website/pages/ar/deploying/subgraph-studio.mdx delete mode 100644 website/pages/ar/developing/creating-a-subgraph.mdx create mode 100644 website/pages/ar/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/ar/developing/graph-ts/_meta.js delete mode 100644 website/pages/ar/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/ar/mips-faqs.mdx create mode 100644 website/pages/ar/querying/graph-client/_meta.js delete mode 100644 website/pages/cs/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/cs/deploying/hosted-service.mdx delete mode 100644 website/pages/cs/deploying/subgraph-studio.mdx delete mode 100644 website/pages/cs/developing/creating-a-subgraph.mdx create mode 100644 website/pages/cs/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/cs/developing/graph-ts/_meta.js delete mode 100644 website/pages/cs/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/cs/mips-faqs.mdx create mode 100644 website/pages/cs/querying/graph-client/_meta.js delete mode 100644 website/pages/de/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/de/deploying/hosted-service.mdx delete mode 100644 website/pages/de/deploying/subgraph-studio.mdx delete mode 100644 website/pages/de/developing/creating-a-subgraph.mdx create mode 100644 website/pages/de/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/de/developing/graph-ts/_meta.js delete mode 100644 website/pages/de/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/de/mips-faqs.mdx create mode 100644 website/pages/de/querying/graph-client/_meta.js delete mode 100644 website/pages/es/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/es/deploying/hosted-service.mdx delete mode 100644 website/pages/es/deploying/subgraph-studio.mdx delete mode 100644 website/pages/es/developing/creating-a-subgraph.mdx create mode 100644 website/pages/es/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/es/developing/graph-ts/_meta.js delete mode 100644 website/pages/es/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/es/mips-faqs.mdx create mode 100644 website/pages/es/querying/graph-client/_meta.js delete mode 100644 website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/fr/deploying/hosted-service.mdx delete mode 100644 website/pages/fr/deploying/subgraph-studio.mdx delete mode 100644 website/pages/fr/developing/creating-a-subgraph.mdx create mode 100644 website/pages/fr/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/fr/developing/graph-ts/_meta.js delete mode 100644 website/pages/fr/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/fr/mips-faqs.mdx create mode 100644 website/pages/fr/querying/graph-client/_meta.js create mode 100644 website/pages/ha/cookbook/avoid-eth-calls.mdx create mode 100644 website/pages/ha/cookbook/derivedfrom.mdx create mode 100644 website/pages/ha/cookbook/how-to-secure-api-keys-using-nextjs-server-components.mdx create mode 100644 website/pages/ha/cookbook/immutable-entities-bytes-as-ids.mdx create mode 100644 website/pages/ha/cookbook/pruning.mdx delete mode 100644 website/pages/ha/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/ha/deploying/hosted-service.mdx delete mode 100644 website/pages/ha/deploying/subgraph-studio.mdx delete mode 100644 website/pages/ha/developing/creating-a-subgraph.mdx create mode 100644 website/pages/ha/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/ha/developing/creating-a-subgraph/advanced.mdx create mode 100644 website/pages/ha/developing/creating-a-subgraph/assemblyscript-mappings.mdx create mode 100644 website/pages/ha/developing/creating-a-subgraph/install-the-cli.mdx create mode 100644 website/pages/ha/developing/creating-a-subgraph/ql-schema.mdx create mode 100644 website/pages/ha/developing/creating-a-subgraph/starting-your-subgraph.mdx create mode 100644 website/pages/ha/developing/creating-a-subgraph/subgraph-manifest.mdx create mode 100644 website/pages/ha/developing/graph-ts/_meta.js delete mode 100644 website/pages/ha/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/ha/mips-faqs.mdx create mode 100644 website/pages/ha/network/contracts.mdx create mode 100644 website/pages/ha/querying/graph-client/_meta.js delete mode 100644 website/pages/hi/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/hi/deploying/hosted-service.mdx delete mode 100644 website/pages/hi/deploying/subgraph-studio.mdx delete mode 100644 website/pages/hi/developing/creating-a-subgraph.mdx create mode 100644 website/pages/hi/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/hi/developing/graph-ts/_meta.js delete mode 100644 website/pages/hi/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/hi/mips-faqs.mdx create mode 100644 website/pages/hi/querying/graph-client/_meta.js delete mode 100644 website/pages/it/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/it/deploying/hosted-service.mdx delete mode 100644 website/pages/it/deploying/subgraph-studio.mdx delete mode 100644 website/pages/it/developing/creating-a-subgraph.mdx create mode 100644 website/pages/it/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/it/developing/graph-ts/_meta.js delete mode 100644 website/pages/it/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/it/mips-faqs.mdx create mode 100644 website/pages/it/querying/graph-client/_meta.js delete mode 100644 website/pages/ja/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/ja/deploying/hosted-service.mdx delete mode 100644 website/pages/ja/deploying/subgraph-studio.mdx delete mode 100644 website/pages/ja/developing/creating-a-subgraph.mdx create mode 100644 website/pages/ja/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/ja/developing/graph-ts/_meta.js delete mode 100644 website/pages/ja/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/ja/mips-faqs.mdx create mode 100644 website/pages/ja/querying/graph-client/_meta.js delete mode 100644 website/pages/ko/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/ko/deploying/hosted-service.mdx delete mode 100644 website/pages/ko/deploying/subgraph-studio.mdx delete mode 100644 website/pages/ko/developing/creating-a-subgraph.mdx create mode 100644 website/pages/ko/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/ko/developing/graph-ts/_meta.js delete mode 100644 website/pages/ko/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/ko/mips-faqs.mdx create mode 100644 website/pages/ko/querying/graph-client/_meta.js delete mode 100644 website/pages/mr/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/mr/deploying/hosted-service.mdx delete mode 100644 website/pages/mr/deploying/subgraph-studio.mdx delete mode 100644 website/pages/mr/developing/creating-a-subgraph.mdx create mode 100644 website/pages/mr/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/mr/developing/graph-ts/_meta.js delete mode 100644 website/pages/mr/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/mr/mips-faqs.mdx create mode 100644 website/pages/mr/querying/graph-client/_meta.js delete mode 100644 website/pages/nl/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/nl/deploying/hosted-service.mdx delete mode 100644 website/pages/nl/deploying/subgraph-studio.mdx delete mode 100644 website/pages/nl/developing/creating-a-subgraph.mdx create mode 100644 website/pages/nl/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/nl/developing/graph-ts/_meta.js delete mode 100644 website/pages/nl/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/nl/mips-faqs.mdx create mode 100644 website/pages/nl/querying/graph-client/_meta.js delete mode 100644 website/pages/pl/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/pl/deploying/hosted-service.mdx delete mode 100644 website/pages/pl/deploying/subgraph-studio.mdx delete mode 100644 website/pages/pl/developing/creating-a-subgraph.mdx create mode 100644 website/pages/pl/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/pl/developing/graph-ts/_meta.js delete mode 100644 website/pages/pl/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/pl/mips-faqs.mdx create mode 100644 website/pages/pl/querying/graph-client/_meta.js delete mode 100644 website/pages/pt/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/pt/deploying/hosted-service.mdx delete mode 100644 website/pages/pt/deploying/subgraph-studio.mdx delete mode 100644 website/pages/pt/developing/creating-a-subgraph.mdx create mode 100644 website/pages/pt/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/pt/developing/graph-ts/_meta.js delete mode 100644 website/pages/pt/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/pt/mips-faqs.mdx create mode 100644 website/pages/pt/querying/graph-client/_meta.js delete mode 100644 website/pages/ro/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/ro/deploying/hosted-service.mdx delete mode 100644 website/pages/ro/deploying/subgraph-studio.mdx delete mode 100644 website/pages/ro/developing/creating-a-subgraph.mdx create mode 100644 website/pages/ro/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/ro/developing/graph-ts/_meta.js delete mode 100644 website/pages/ro/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/ro/mips-faqs.mdx create mode 100644 website/pages/ro/querying/graph-client/_meta.js delete mode 100644 website/pages/ru/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/ru/deploying/hosted-service.mdx delete mode 100644 website/pages/ru/deploying/subgraph-studio.mdx delete mode 100644 website/pages/ru/developing/creating-a-subgraph.mdx create mode 100644 website/pages/ru/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/ru/developing/graph-ts/_meta.js delete mode 100644 website/pages/ru/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/ru/mips-faqs.mdx create mode 100644 website/pages/ru/querying/graph-client/_meta.js delete mode 100644 website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/sv/deploying/hosted-service.mdx delete mode 100644 website/pages/sv/deploying/subgraph-studio.mdx delete mode 100644 website/pages/sv/developing/creating-a-subgraph.mdx create mode 100644 website/pages/sv/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/sv/developing/graph-ts/_meta.js delete mode 100644 website/pages/sv/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/sv/mips-faqs.mdx create mode 100644 website/pages/sv/querying/graph-client/_meta.js create mode 100644 website/pages/sw/_meta.js create mode 100644 website/pages/sw/arbitrum/_meta.js create mode 100644 website/pages/sw/cookbook/_meta.js create mode 100644 website/pages/sw/deploying/_meta.js create mode 100644 website/pages/sw/developing/_meta.js create mode 100644 website/pages/sw/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/sw/developing/graph-ts/_meta.js create mode 100644 website/pages/sw/managing/_meta.js create mode 100644 website/pages/sw/network/_meta.js create mode 100644 website/pages/sw/publishing/_meta.js create mode 100644 website/pages/sw/querying/_meta.js create mode 100644 website/pages/sw/querying/graph-client/_meta.js create mode 100644 website/pages/sw/release-notes/_meta.js create mode 100644 website/pages/sw/sps/_meta.js create mode 100644 website/pages/sw/translations.ts delete mode 100644 website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/tr/deploying/hosted-service.mdx delete mode 100644 website/pages/tr/deploying/subgraph-studio.mdx delete mode 100644 website/pages/tr/developing/creating-a-subgraph.mdx create mode 100644 website/pages/tr/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/tr/developing/graph-ts/_meta.js delete mode 100644 website/pages/tr/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/tr/mips-faqs.mdx create mode 100644 website/pages/tr/querying/graph-client/_meta.js delete mode 100644 website/pages/uk/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/uk/deploying/hosted-service.mdx delete mode 100644 website/pages/uk/deploying/subgraph-studio.mdx delete mode 100644 website/pages/uk/developing/creating-a-subgraph.mdx create mode 100644 website/pages/uk/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/uk/developing/graph-ts/_meta.js delete mode 100644 website/pages/uk/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/uk/mips-faqs.mdx create mode 100644 website/pages/uk/querying/graph-client/_meta.js delete mode 100644 website/pages/ur/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/ur/deploying/hosted-service.mdx delete mode 100644 website/pages/ur/deploying/subgraph-studio.mdx delete mode 100644 website/pages/ur/developing/creating-a-subgraph.mdx create mode 100644 website/pages/ur/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/ur/developing/graph-ts/_meta.js delete mode 100644 website/pages/ur/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/ur/mips-faqs.mdx create mode 100644 website/pages/ur/querying/graph-client/_meta.js delete mode 100644 website/pages/vi/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/vi/deploying/hosted-service.mdx delete mode 100644 website/pages/vi/deploying/subgraph-studio.mdx delete mode 100644 website/pages/vi/developing/creating-a-subgraph.mdx create mode 100644 website/pages/vi/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/vi/developing/graph-ts/_meta.js delete mode 100644 website/pages/vi/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/vi/mips-faqs.mdx create mode 100644 website/pages/vi/querying/graph-client/_meta.js delete mode 100644 website/pages/yo/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/yo/deploying/hosted-service.mdx delete mode 100644 website/pages/yo/deploying/subgraph-studio.mdx delete mode 100644 website/pages/yo/developing/creating-a-subgraph.mdx create mode 100644 website/pages/yo/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/yo/developing/creating-a-subgraph/advanced.mdx create mode 100644 website/pages/yo/developing/creating-a-subgraph/assemblyscript-mappings.mdx create mode 100644 website/pages/yo/developing/creating-a-subgraph/install-the-cli.mdx create mode 100644 website/pages/yo/developing/creating-a-subgraph/ql-schema.mdx create mode 100644 website/pages/yo/developing/creating-a-subgraph/starting-your-subgraph.mdx create mode 100644 website/pages/yo/developing/creating-a-subgraph/subgraph-manifest.mdx create mode 100644 website/pages/yo/developing/graph-ts/_meta.js delete mode 100644 website/pages/yo/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/yo/mips-faqs.mdx create mode 100644 website/pages/yo/querying/graph-client/_meta.js delete mode 100644 website/pages/zh/deploying/deploying-a-subgraph-to-hosted.mdx delete mode 100644 website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx delete mode 100644 website/pages/zh/deploying/hosted-service.mdx delete mode 100644 website/pages/zh/deploying/subgraph-studio.mdx delete mode 100644 website/pages/zh/developing/creating-a-subgraph.mdx create mode 100644 website/pages/zh/developing/creating-a-subgraph/_meta.js create mode 100644 website/pages/zh/developing/graph-ts/_meta.js delete mode 100644 website/pages/zh/managing/deprecate-a-subgraph.mdx delete mode 100644 website/pages/zh/mips-faqs.mdx create mode 100644 website/pages/zh/querying/graph-client/_meta.js create mode 100644 website/scripts/fix-pages-structure.ts diff --git a/package.json b/package.json index ce1ccbd836d1..5c0736904ac0 100644 --- a/package.json +++ b/package.json @@ -1,27 +1,27 @@ { "name": "the-graph-docs-monorepo", "private": true, - "packageManager": "pnpm@9.13.2", + "packageManager": "pnpm@9.15.0", "scripts": { - "build": "NODE_OPTIONS='--max_old_space_size=4096' turbo run build", - "check": "pnpm typecheck && pnpm lint && pnpm prettier:check", - "check:fix": "pnpm typecheck; pnpm lint:fix", "dev": "turbo run dev --parallel", + "build": "NODE_OPTIONS='--max_old_space_size=4096' turbo run build", "docker:build": "source ./website/.env.local && DOCKER_BUILDKIT=1 docker build . -t docs --no-cache --build-arg ENVIRONMENT=$ENVIRONMENT", "docker:clean": "docker builder prune", "docker:up": "docker run --rm -it -p 3000:80 -v \"$(pwd)/nginx.conf:/etc/nginx/nginx.conf\" docs", + "check": "pnpm typecheck && pnpm lint && pnpm prettier:check", + "check:fix": "pnpm typecheck; pnpm lint:fix", + "typecheck": "turbo run typecheck", "lint": "eslint . --cache --ignore-path .gitignore --max-warnings 0", "lint:fix": "eslint . --cache --ignore-path .gitignore --fix; pnpm prettier", "prettier": "pnpm prettier:check --write", "prettier:check": "prettier --cache --check .", - "test": "turbo run test", - "typecheck": "turbo run typecheck" + "test": "turbo run test" }, "devDependencies": { "@edgeandnode/eslint-config": "^2.0.3", "eslint": "^8.57.1", "eslint-plugin-mdx": "^2.3.4", - "prettier": "^3.3.3", + "prettier": "^3.4.2", "prettier-plugin-tailwindcss": "^0.6.9", "remark-frontmatter": "^5.0.0", "remark-lint-first-heading-level": "^3.1.2", @@ -29,6 +29,6 @@ "remark-lint-no-heading-punctuation": "^3.1.2", "remark-lint-restrict-elements": "workspace:*", "turbo": "^1.13.4", - "typescript": "^5.6.3" + "typescript": "^5.7.2" } } diff --git a/packages/nextra-theme/package.json b/packages/nextra-theme/package.json index 226b91c8ace0..c8ebe64383a8 100644 --- a/packages/nextra-theme/package.json +++ b/packages/nextra-theme/package.json @@ -36,20 +36,20 @@ }, "dependencies": { "@docsearch/react": "^3.8.0", - "@radix-ui/react-collapsible": "^1.1.1", - "@radix-ui/react-visually-hidden": "^1.1.0", + "@radix-ui/react-collapsible": "^1.1.2", + "@radix-ui/react-visually-hidden": "^1.1.1", "lodash": "^4.17.21", "react-intersection-observer": "^9.13.1", - "react-use": "^17.5.1" + "react-use": "^17.6.0" }, "devDependencies": { - "@edgeandnode/gds": "~5.34.0", - "@edgeandnode/go": "~6.60.0", - "@emotion/react": "^11.13.3", + "@edgeandnode/gds": "^5.39.1", + "@edgeandnode/go": "^6.74.0", + "@emotion/react": "^11.14.0", "@types/lodash": "^4.17.13", - "@types/react": "^18.3.12", - "@types/react-dom": "^18.3.1", - "next": "^14.2.18", + "@types/react": "^18.3.16", + "@types/react-dom": "^18.3.5", + "next": "^14.2.20", "next-seo": "^6.6.0", "nextra": "^2.13.4", "react": "^18.3.1", diff --git a/packages/nextra-theme/src/layout/MDXLayoutNav.tsx b/packages/nextra-theme/src/layout/MDXLayoutNav.tsx index b0cd21686b7b..535c8de51252 100644 --- a/packages/nextra-theme/src/layout/MDXLayoutNav.tsx +++ b/packages/nextra-theme/src/layout/MDXLayoutNav.tsx @@ -133,6 +133,10 @@ export const MDXLayoutNav = ({ mobile = false }: { mobile?: boolean }) => { return {pageItem.title} } if ('children' in pageItem && pageItem.children) { + if (pageItem.children.length === 0) { + return null + } + if (pageItem.type === 'children') { return {pageItem.children.map(renderSidebar)} } diff --git a/packages/og-image/package.json b/packages/og-image/package.json index 2f00117bf5cd..345ba0ad437c 100644 --- a/packages/og-image/package.json +++ b/packages/og-image/package.json @@ -16,12 +16,12 @@ "yoga-wasm-web": "0.3.3" }, "devDependencies": { - "@cloudflare/workers-types": "^4.20241112.0", - "@types/react": "^18.3.12", + "@cloudflare/workers-types": "^4.20241205.0", + "@types/react": "^18.3.16", "jest-image-snapshot": "^6.4.0", "tsx": "^4.19.2", - "typescript": "^5.6.3", + "typescript": "^5.7.2", "vitest": "^1.6.0", - "wrangler": "^3.87.0" + "wrangler": "^3.95.0" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 19085d8aec71..c1376974eb47 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -10,7 +10,7 @@ importers: devDependencies: '@edgeandnode/eslint-config': specifier: ^2.0.3 - version: 2.0.3(eslint@8.57.1)(typescript@5.6.3) + version: 2.0.3(eslint@8.57.1)(typescript@5.7.2) eslint: specifier: ^8.57.1 version: 8.57.1 @@ -18,11 +18,11 @@ importers: specifier: ^2.3.4 version: 2.3.4(eslint@8.57.1) prettier: - specifier: ^3.3.3 - version: 3.3.3 + specifier: ^3.4.2 + version: 3.4.2 prettier-plugin-tailwindcss: specifier: ^0.6.9 - version: 0.6.9(prettier@3.3.3) + version: 0.6.9(prettier@3.4.2) remark-frontmatter: specifier: ^5.0.0 version: 5.0.0 @@ -42,20 +42,20 @@ importers: specifier: ^1.13.4 version: 1.13.4 typescript: - specifier: ^5.6.3 - version: 5.6.3 + specifier: ^5.7.2 + version: 5.7.2 packages/nextra-theme: dependencies: '@docsearch/react': specifier: ^3.8.0 - version: 3.8.0(@algolia/client-search@5.14.2)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.2) + version: 3.8.0(@algolia/client-search@5.17.1)(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.3) '@radix-ui/react-collapsible': - specifier: ^1.1.1 - version: 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: ^1.1.2 + version: 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-visually-hidden': - specifier: ^1.1.0 - version: 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: ^1.1.1 + version: 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) lodash: specifier: ^4.17.21 version: 4.17.21 @@ -63,36 +63,36 @@ importers: specifier: ^9.13.1 version: 9.13.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react-use: - specifier: ^17.5.1 - version: 17.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: ^17.6.0 + version: 17.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) devDependencies: '@edgeandnode/gds': - specifier: ~5.34.0 - version: 5.34.0(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(@types/react-dom@18.3.1)(@types/react@18.3.12)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + specifier: ^5.39.1 + version: 5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@edgeandnode/go': - specifier: ~6.60.0 - version: 6.60.0(as4xc7bxwgf6xy75as5gbynhpq) + specifier: ^6.74.0 + version: 6.74.0(pvvz7w46g6d2da5mez3pi63ihq) '@emotion/react': - specifier: ^11.13.3 - version: 11.13.3(@types/react@18.3.12)(react@18.3.1) + specifier: ^11.14.0 + version: 11.14.0(@types/react@18.3.16)(react@18.3.1) '@types/lodash': specifier: ^4.17.13 version: 4.17.13 '@types/react': - specifier: ^18.3.12 - version: 18.3.12 + specifier: ^18.3.16 + version: 18.3.16 '@types/react-dom': - specifier: ^18.3.1 - version: 18.3.1 + specifier: ^18.3.5 + version: 18.3.5(@types/react@18.3.16) next: - specifier: ^14.2.18 - version: 14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: ^14.2.20 + version: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-seo: specifier: ^6.6.0 - version: 6.6.0(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + version: 6.6.0(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nextra: specifier: ^2.13.4 - version: 2.13.4(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + version: 2.13.4(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: specifier: ^18.3.1 version: 18.3.1 @@ -101,10 +101,10 @@ importers: version: 18.3.1(react@18.3.1) theme-ui: specifier: ^0.17.1 - version: 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) + version: 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) tsup: specifier: ^8.3.5 - version: 8.3.5(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.6.0) + version: 8.3.5(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.7.2)(yaml@2.6.1) packages/og-image: dependencies: @@ -122,11 +122,11 @@ importers: version: 0.3.3 devDependencies: '@cloudflare/workers-types': - specifier: ^4.20241112.0 - version: 4.20241112.0 + specifier: ^4.20241205.0 + version: 4.20241205.0 '@types/react': - specifier: ^18.3.12 - version: 18.3.12 + specifier: ^18.3.16 + version: 18.3.16 jest-image-snapshot: specifier: ^6.4.0 version: 6.4.0 @@ -134,14 +134,14 @@ importers: specifier: ^4.19.2 version: 4.19.2 typescript: - specifier: ^5.6.3 - version: 5.6.3 + specifier: ^5.7.2 + version: 5.7.2 vitest: specifier: ^1.6.0 - version: 1.6.0(@types/node@22.7.8)(jsdom@24.1.3) + version: 1.6.0(@types/node@22.10.2)(jsdom@24.1.3) wrangler: - specifier: ^3.87.0 - version: 3.87.0(@cloudflare/workers-types@4.20241112.0) + specifier: ^3.95.0 + version: 3.95.0(@cloudflare/workers-types@4.20241205.0) packages/remark-lint-restrict-elements: dependencies: @@ -155,20 +155,20 @@ importers: website: dependencies: '@edgeandnode/common': - specifier: ^6.30.0 - version: 6.30.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) + specifier: ^6.38.0 + version: 6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) '@edgeandnode/gds': - specifier: ~5.34.0 - version: 5.34.0(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(@types/react-dom@18.3.1)(@types/react@18.3.12)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + specifier: ^5.39.1 + version: 5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@edgeandnode/go': - specifier: ~6.60.0 - version: 6.60.0(as4xc7bxwgf6xy75as5gbynhpq) + specifier: ^6.74.0 + version: 6.74.0(pvvz7w46g6d2da5mez3pi63ihq) '@emotion/react': - specifier: ^11.13.3 - version: 11.13.3(@types/react@18.3.12)(react@18.3.1) + specifier: ^11.14.0 + version: 11.14.0(@types/react@18.3.16)(react@18.3.1) '@graphprotocol/contracts': specifier: 6.2.1 - version: 6.2.1(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + version: 6.2.1(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@graphprotocol/nextra-theme': specifier: workspace:* version: link:../packages/nextra-theme @@ -179,17 +179,17 @@ importers: specifier: ^2.56.0 version: 2.56.0 next: - specifier: ^14.2.18 - version: 14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: ^14.2.20 + version: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-seo: specifier: ^6.6.0 - version: 6.6.0(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + version: 6.6.0(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-sitemap: specifier: ^4.2.3 - version: 4.2.3(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) + version: 4.2.3(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)) nextra: specifier: ^2.13.4 - version: 2.13.4(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + version: 2.13.4(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: specifier: ^18.3.1 version: 18.3.1 @@ -201,14 +201,14 @@ importers: version: 2.1.0 theme-ui: specifier: ^0.17.1 - version: 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) + version: 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) unist-util-visit: specifier: ^5.0.0 version: 5.0.0 devDependencies: '@graphprotocol/client-cli': specifier: 3.0.3 - version: 3.0.3(@envelop/core@5.0.2)(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@graphql-tools/merge@9.0.8(graphql@16.9.0))(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@graphql-tools/wrap@10.0.10(graphql@16.9.0))(@types/node@22.7.8)(@types/react@18.3.12)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-ws@5.16.0(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + version: 3.0.3(@envelop/core@5.0.2)(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@graphql-tools/merge@9.0.13(graphql@16.9.0))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@graphql-tools/wrap@10.0.26(graphql@16.9.0))(@types/node@22.10.2)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0) '@types/mdast': specifier: ^4.0.4 version: 4.0.4 @@ -216,11 +216,11 @@ importers: specifier: ^2.50.2 version: 2.50.2 '@types/react': - specifier: ^18.3.12 - version: 18.3.12 + specifier: ^18.3.16 + version: 18.3.16 '@types/react-dom': - specifier: ^18.3.1 - version: 18.3.1 + specifier: ^18.3.5 + version: 18.3.5(@types/react@18.3.16) autoprefixer: specifier: ^10.4.20 version: 10.4.20(postcss@8.4.49) @@ -234,8 +234,8 @@ importers: specifier: ^8.4.49 version: 8.4.49 tailwindcss: - specifier: ^3.4.15 - version: 3.4.15(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)) + specifier: ^3.4.16 + version: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) tsx: specifier: ^4.19.2 version: 4.19.2 @@ -265,56 +265,56 @@ packages: '@algolia/client-search': '>= 4.9.1 < 6' algoliasearch: '>= 4.9.1 < 6' - '@algolia/client-abtesting@5.14.2': - resolution: {integrity: sha512-7fq1tWIy1aNJEaNHxWy3EwDkuo4k22+NBnxq9QlYVSLLXtr6HqmAm6bQgNNzGT3vm21iKqWO9efk+HIhEM1SzQ==} + '@algolia/client-abtesting@5.17.1': + resolution: {integrity: sha512-Os/xkQbDp5A5RdGYq1yS3fF69GoBJH5FIfrkVh+fXxCSe714i1Xdl9XoXhS4xG76DGKm6EFMlUqP024qjps8cg==} engines: {node: '>= 14.0.0'} - '@algolia/client-analytics@5.14.2': - resolution: {integrity: sha512-5Nm5cOOyAGcY+hKNJVmR2jgoGn1nvoANS8W5EfB8yAaUqUxL3lFNUHSkFafAMTCOcVKNDkZQYjUDbOOfdYJLqw==} + '@algolia/client-analytics@5.17.1': + resolution: {integrity: sha512-WKpGC+cUhmdm3wndIlTh8RJXoVabUH+4HrvZHC4hXtvCYojEXYeep8RZstatwSZ7Ocg6Y2u67bLw90NEINuYEw==} engines: {node: '>= 14.0.0'} - '@algolia/client-common@5.14.2': - resolution: {integrity: sha512-BW1Qzhh9tMKEsWSQQsiOEcHAd6g7zxq9RpPVmyxbDO/O4eA4vyN+Qz5Jzo686kuYdIQKqIPCEtob/JM89tk57g==} + '@algolia/client-common@5.17.1': + resolution: {integrity: sha512-5rb5+yPIie6912riAypTSyzbE23a7UM1UpESvD8GEPI4CcWQvA9DBlkRNx9qbq/nJ5pvv8VjZjUxJj7rFkzEAA==} engines: {node: '>= 14.0.0'} - '@algolia/client-insights@5.14.2': - resolution: {integrity: sha512-17zg6pqifKORvvrMIqW6HhwUry9RKRXLgADrgFjZ6PZvGB4oVs12dwRG2/HMrIlpxd9cjeQfdlEgHj6lbAf6QA==} + '@algolia/client-insights@5.17.1': + resolution: {integrity: sha512-nb/tfwBMn209TzFv1DDTprBKt/wl5btHVKoAww9fdEVdoKK02R2KAqxe5tuXLdEzAsS+LevRyOM/YjXuLmPtjQ==} engines: {node: '>= 14.0.0'} - '@algolia/client-personalization@5.14.2': - resolution: {integrity: sha512-5IYt8vbmTA52xyuaZKFwiRoDPeh7hiOC9aBZqqp9fVs6BU01djI/T8pGJXawvwczltCPYzNsdbllV3rqiDbxmQ==} + '@algolia/client-personalization@5.17.1': + resolution: {integrity: sha512-JuNlZe1SdW9KbV0gcgdsiVkFfXt0mmPassdS3cBSGvZGbPB9JsHthD719k5Y6YOY4dGvw1JmC1i9CwCQHAS8hg==} engines: {node: '>= 14.0.0'} - '@algolia/client-query-suggestions@5.14.2': - resolution: {integrity: sha512-gvCX/cczU76Bu1sGcxxTdoIwxe+FnuC1IlW9SF/gzxd3ZzsgzBpzD2puIJqt9fHQsjLxVGkJqKev2FtExnJYZg==} + '@algolia/client-query-suggestions@5.17.1': + resolution: {integrity: sha512-RBIFIv1QE3IlAikJKWTOpd6pwE4d2dY6t02iXH7r/SLXWn0HzJtsAPPeFg/OKkFvWAXt0H7In2/Mp7a1/Dy2pw==} engines: {node: '>= 14.0.0'} - '@algolia/client-search@5.14.2': - resolution: {integrity: sha512-0imdBZDjqxrshw0+eyJUgnkRAbS2W93UQ3BVj8VjN4xQylIMf0fWs72W7MZFdHlH78JJYydevgzqvGMcV0Z1CA==} + '@algolia/client-search@5.17.1': + resolution: {integrity: sha512-bd5JBUOP71kPsxwDcvOxqtqXXVo/706NFifZ/O5Rx5GB8ZNVAhg4l7aGoT6jBvEfgmrp2fqPbkdIZ6JnuOpGcw==} engines: {node: '>= 14.0.0'} - '@algolia/ingestion@1.14.2': - resolution: {integrity: sha512-/p4rBNkW0fgCpCwrwre+jHfzlFQsLemgaAQqyui8NPxw95Wgf3p+DKxYzcmh8dygT7ub7FwztTW+uURLX1uqIQ==} + '@algolia/ingestion@1.17.1': + resolution: {integrity: sha512-T18tvePi1rjRYcIKhd82oRukrPWHxG/Iy1qFGaxCplgRm9Im5z96qnYOq75MSKGOUHkFxaBKJOLmtn8xDR+Mcw==} engines: {node: '>= 14.0.0'} - '@algolia/monitoring@1.14.2': - resolution: {integrity: sha512-81R57Y/mS0uNhWpu6cNEfkbkADLW4bP0BNjuPpxAypobv7WzYycUnbMvv1YkN6OsociB4+3M7HfsVzj4Nc09vA==} + '@algolia/monitoring@1.17.1': + resolution: {integrity: sha512-gDtow+AUywTehRP8S1tWKx2IvhcJOxldAoqBxzN3asuQobF7er5n72auBeL++HY4ImEuzMi7PDOA/Iuwxs2IcA==} engines: {node: '>= 14.0.0'} - '@algolia/recommend@5.14.2': - resolution: {integrity: sha512-OwELnAZxCUyfjYjqsrFmC7Vfa12kqwbDdLUV0oi4j+4pxDsfPgkiZ6iCH2uPw6X8VK88Hl3InPt+RPaZvcrCWg==} + '@algolia/recommend@5.17.1': + resolution: {integrity: sha512-2992tTHkRe18qmf5SP57N78kN1D3e5t4PO1rt10sJncWtXBZWiNOK6K/UcvWsFbNSGAogFcIcvIMAl5mNp6RWA==} engines: {node: '>= 14.0.0'} - '@algolia/requester-browser-xhr@5.14.2': - resolution: {integrity: sha512-irUvkK+TGBhyivtNCIIbVgNUgbUoHOSk8m/kFX4ddto/PUPmLFRRNNnMHtJ1+OzrJ/uD3Am4FUK2Yt+xgQr05w==} + '@algolia/requester-browser-xhr@5.17.1': + resolution: {integrity: sha512-XpKgBfyczVesKgr7DOShNyPPu5kqlboimRRPjdqAw5grSyHhCmb8yoTIKy0TCqBABZeXRPMYT13SMruUVRXvHA==} engines: {node: '>= 14.0.0'} - '@algolia/requester-fetch@5.14.2': - resolution: {integrity: sha512-UNBg5mM4MIYdxPuVjyDL22BC6P87g7WuM91Z1Ky0J19aEGvCSF+oR+9autthROFXdRnAa1rACOjuqn95iBbKpw==} + '@algolia/requester-fetch@5.17.1': + resolution: {integrity: sha512-EhUomH+DZP5vb6DnEjT0GvXaXBSwzZnuU6hPGNU1EYKRXDouRjII/bIWpVjt7ycMgL2D2oQruqDh6rAWUhQwRw==} engines: {node: '>= 14.0.0'} - '@algolia/requester-node-http@5.14.2': - resolution: {integrity: sha512-CTFA03YiLcnpP+JoLRqjHt5pqDHuKWJpLsIBY/60Gmw8pjALZ3TwvbAquRX4Vy+yrin178NxMuU+ilZ54f2IrQ==} + '@algolia/requester-node-http@5.17.1': + resolution: {integrity: sha512-PSnENJtl4/wBWXlGyOODbLYm6lSiFqrtww7UpQRCJdsHXlJKF8XAP6AME8NxvbE0Qo/RJUxK0mvyEh9sQcx6bg==} engines: {node: '>= 14.0.0'} '@alloc/quick-lru@5.2.0': @@ -325,26 +325,8 @@ packages: resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} engines: {node: '>=6.0.0'} - '@apollo/client@3.11.8': - resolution: {integrity: sha512-CgG1wbtMjsV2pRGe/eYITmV5B8lXUCYljB2gB/6jWTFQcrvirUVvKg7qtFdjYkQSFbIffU1IDyxgeaN81eTjbA==} - peerDependencies: - graphql: ^15.0.0 || ^16.0.0 - graphql-ws: ^5.5.5 - react: ^16.8.0 || ^17.0.0 || ^18.0.0 || >=19.0.0-rc <19.0.0 - react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || >=19.0.0-rc <19.0.0 - subscriptions-transport-ws: ^0.9.0 || ^0.11.0 - peerDependenciesMeta: - graphql-ws: - optional: true - react: - optional: true - react-dom: - optional: true - subscriptions-transport-ws: - optional: true - - '@arbitrum/sdk@3.7.0': - resolution: {integrity: sha512-7Omaqd8xfhCatxyyFZC3k7S9HE8pTVuk9tg+snqk8ojDVqO8kiD3YrYS9STJqbKxLBQ1TRktbRaUOAiH3+Y0zg==} + '@arbitrum/sdk@3.7.1': + resolution: {integrity: sha512-OP0klpCFUU8KC22d1UqSaKYMT8EQa5UnHtwOR196GWYVv0FemDSXY4V6aqhDGYxJgMb3uBzm7VpKT/1Ww2NvWQ==} engines: {node: '>=v11', npm: please-use-yarn, yarn: '>= 1.0.0'} '@ardatan/relay-compiler@12.0.0': @@ -357,20 +339,24 @@ packages: resolution: {integrity: sha512-xhlTqH0m31mnsG0tIP4ETgfSB6gXDaYYsUWTrlUV93fFQPI9dd8hE0Ot6MHLCtqgB32hwJAC3YZMWlXZw7AleA==} engines: {node: '>=14'} - '@babel/code-frame@7.25.9': - resolution: {integrity: sha512-z88xeGxnzehn2sqZ8UdGQEvYErF1odv2CftxInpSYJt6uHuPe9YjahKZITGs3l5LeI9d2ROG+obuDAoSlqbNfQ==} + '@babel/code-frame@7.26.2': + resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.26.3': + resolution: {integrity: sha512-nHIxvKPniQXpmQLb0vhY3VaFb3S0YrTAwpOWJZh1wn3oJPjJk9Asva204PsBdmAE8vpzfHudT8DB0scYvy9q0g==} engines: {node: '>=6.9.0'} - '@babel/compat-data@7.25.9': - resolution: {integrity: sha512-yD+hEuJ/+wAJ4Ox2/rpNv5HIuPG82x3ZlQvYVn8iYCprdxzE7P1udpGF1jyjQVBU4dgznN+k2h103vxZ7NdPyw==} + '@babel/core@7.26.0': + resolution: {integrity: sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==} engines: {node: '>=6.9.0'} - '@babel/core@7.25.9': - resolution: {integrity: sha512-WYvQviPw+Qyib0v92AwNIrdLISTp7RfDkM7bPqBvpbnhY4wq8HvHBZREVdYDXk98C8BkOIVnHAY3yvj7AVISxQ==} + '@babel/generator@7.26.2': + resolution: {integrity: sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==} engines: {node: '>=6.9.0'} - '@babel/generator@7.25.9': - resolution: {integrity: sha512-omlUGkr5EaoIJrhLf9CJ0TvjBRpd9+AXRG//0GEQ9THSo8wPiTlbpy1/Ow8ZTrbXpjd9FHXfbFQx32I04ht0FA==} + '@babel/generator@7.26.3': + resolution: {integrity: sha512-6FF/urZvD0sTeO7k6/B15pMLC4CHUv1426lzr3N01aHJTl046uCAh9LXW/fzeXXjPNCJ6iABW5XaWOsIZB93aQ==} engines: {node: '>=6.9.0'} '@babel/helper-annotate-as-pure@7.25.9': @@ -395,8 +381,8 @@ packages: resolution: {integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==} engines: {node: '>=6.9.0'} - '@babel/helper-module-transforms@7.25.9': - resolution: {integrity: sha512-TvLZY/F3+GvdRYFZFyxMvnsKi+4oJdgZzU3BoGN9Uc2d9C6zfNwJcKKhjqLAhK8i46mv93jsO74fDh3ih6rpHA==} + '@babel/helper-module-transforms@7.26.0': + resolution: {integrity: sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 @@ -415,10 +401,6 @@ packages: peerDependencies: '@babel/core': ^7.0.0 - '@babel/helper-simple-access@7.25.9': - resolution: {integrity: sha512-c6WHXuiaRsJTyHYLJV75t9IqsmTbItYfdj99PnzYGQZkYKvan5/2jKJ7gu31J3/BJ/A18grImSPModuyG/Eo0Q==} - engines: {node: '>=6.9.0'} - '@babel/helper-skip-transparent-expression-wrappers@7.25.9': resolution: {integrity: sha512-K4Du3BFa3gvyhzgPcntrkDgZzQaq6uozzcpGbOO1OEJaI+EJdqWIMTLgFgQf6lrfiDFo5FU+BxKepI9RmZqahA==} engines: {node: '>=6.9.0'} @@ -435,16 +417,17 @@ packages: resolution: {integrity: sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==} engines: {node: '>=6.9.0'} - '@babel/helpers@7.25.9': - resolution: {integrity: sha512-oKWp3+usOJSzDZOucZUAMayhPz/xVjzymyDzUN8dk0Wd3RWMlGLXi07UCQ/CgQVb8LvXx3XBajJH4XGgkt7H7g==} + '@babel/helpers@7.26.0': + resolution: {integrity: sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==} engines: {node: '>=6.9.0'} - '@babel/highlight@7.25.9': - resolution: {integrity: sha512-llL88JShoCsth8fF8R4SJnIn+WLvR6ccFxu1H3FlMhDontdcmZWf2HgIZ7AIqV3Xcck1idlohrN4EUBQz6klbw==} - engines: {node: '>=6.9.0'} + '@babel/parser@7.26.0': + resolution: {integrity: sha512-aP8x5pIw3xvYr/sXT+SEUwyhrXT8rUJRZltK/qN3Db80dcKpTett8cJxHyjk+xYSVXvNnl2SfcJVjbwxpOSscA==} + engines: {node: '>=6.0.0'} + hasBin: true - '@babel/parser@7.25.9': - resolution: {integrity: sha512-aI3jjAAO1fh7vY/pBGsn1i9LDbRP43+asrRlkPuTXW5yHXtd1NgTEMudbBoDDxrf1daEEfPJqR+JBMakzrR4Dg==} + '@babel/parser@7.26.3': + resolution: {integrity: sha512-WJ/CvmY8Mea8iDXo6a7RK2wbmJITT5fN3BEkRuFlxVyNx8jOKIIhmC4fSkTcPcf8JyavbBwIe6OpiCOBXt/IcA==} engines: {node: '>=6.0.0'} hasBin: true @@ -467,14 +450,14 @@ packages: peerDependencies: '@babel/core': ^7.0.0-0 - '@babel/plugin-syntax-flow@7.25.9': - resolution: {integrity: sha512-F3FVgxwamIRS3+kfjNaPARX0DSAiH1exrQUVajXiR34hkdA9eyK+8rJbnu55DQjKL/ayuXqjNr2HDXwBEMEtFQ==} + '@babel/plugin-syntax-flow@7.26.0': + resolution: {integrity: sha512-B+O2DnPc0iG+YXFqOxv2WNuNU97ToWjOomUQ78DouOENWUaM5sVrmet9mcomUGQFwpJd//gvUagXBSdzO1fRKg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 - '@babel/plugin-syntax-import-assertions@7.25.9': - resolution: {integrity: sha512-4GHX5uzr5QMOOuzV0an9MFju4hKlm0OyePl/lHhcsTVae5t/IKVHnb8W67Vr6FuLlk5lPqLB7n7O+K5R46emYg==} + '@babel/plugin-syntax-import-assertions@7.26.0': + resolution: {integrity: sha512-QCWT5Hh830hK5EQa7XzuqIkQU9tT/whqbDz7kuaZMHFl1inRRg7JnuAEOQ0Ur0QUl0NufCk1msK2BeY79Aj/eg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 @@ -556,8 +539,8 @@ packages: peerDependencies: '@babel/core': ^7.0.0-0 - '@babel/plugin-transform-modules-commonjs@7.25.9': - resolution: {integrity: sha512-dwh2Ol1jWwL2MgkCzUSOvfmKElqQcuswAZypBSUsScMXvgdT8Ekq5YA6TtqpTVWH+4903NmboMuH1o9i8Rxlyg==} + '@babel/plugin-transform-modules-commonjs@7.26.3': + resolution: {integrity: sha512-MgR55l4q9KddUDITEzEFYn5ZsGDXMSsU9E+kh7fjRXTIC3RHqfCo8RPRbyReYJh44HQ/yomFkqbOFohXvDCiIQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 @@ -610,20 +593,24 @@ packages: peerDependencies: '@babel/core': ^7.0.0-0 - '@babel/runtime@7.25.9': - resolution: {integrity: sha512-4zpTHZ9Cm6L9L+uIqghQX8ZXg8HKFcjYO3qHoO8zTmRm6HQUJ8SSJ+KRvbMBZn0EGVlT4DRYeQ/6hjlyXBh+Kg==} + '@babel/runtime@7.26.0': + resolution: {integrity: sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==} engines: {node: '>=6.9.0'} '@babel/template@7.25.9': resolution: {integrity: sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==} engines: {node: '>=6.9.0'} - '@babel/traverse@7.25.9': - resolution: {integrity: sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==} + '@babel/traverse@7.26.4': + resolution: {integrity: sha512-fH+b7Y4p3yqvApJALCPJcwb0/XaOSgtK4pzV6WVjPR5GLFQBRI7pfoX2V2iM48NXvX07NUxxm1Vw98YjqTcU5w==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.26.0': + resolution: {integrity: sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==} engines: {node: '>=6.9.0'} - '@babel/types@7.25.9': - resolution: {integrity: sha512-OwS2CM5KocvQ/k7dFJa8i5bNGJP0hXWfVCfDkqRFP1IreH1JDC7wG6eCYCi0+McbfT8OR/kNqsI0UU0xP9H6PQ==} + '@babel/types@7.26.3': + resolution: {integrity: sha512-vN5p+1kl59GVKMvTHt55NzzmYVxprfJD+ql7U9NFIfKCBkYE55LYtS+WtPlaYOyzydrKI8Nezd+aZextrd+FMA==} engines: {node: '>=6.9.0'} '@braintree/sanitize-url@6.0.4': @@ -648,42 +635,42 @@ packages: resolution: {integrity: sha512-YLPHc8yASwjNkmcDMQMY35yiWjoKAKnhUbPRszBRS0YgH+IXtsMp61j+yTcnCE3oO2DgP0U3iejLC8FTtKDC8Q==} engines: {node: '>=16.13'} - '@cloudflare/workerd-darwin-64@1.20241106.1': - resolution: {integrity: sha512-zxvaToi1m0qzAScrxFt7UvFVqU8DxrCO2CinM1yQkv5no7pA1HolpIrwZ0xOhR3ny64Is2s/J6BrRjpO5dM9Zw==} + '@cloudflare/workerd-darwin-64@1.20241205.0': + resolution: {integrity: sha512-TArEZkSZkHJyEwnlWWkSpCI99cF6lJ14OVeEoI9Um/+cD9CKZLM9vCmsLeKglKheJ0KcdCnkA+DbeD15t3VaWg==} engines: {node: '>=16'} cpu: [x64] os: [darwin] - '@cloudflare/workerd-darwin-arm64@1.20241106.1': - resolution: {integrity: sha512-j3dg/42D/bPgfNP3cRUBxF+4waCKO/5YKwXNj+lnVOwHxDu+ne5pFw9TIkKYcWTcwn0ZUkbNZNM5rhJqRn4xbg==} + '@cloudflare/workerd-darwin-arm64@1.20241205.0': + resolution: {integrity: sha512-u5eqKa9QRdA8MugfgCoD+ADDjY6EpKbv3hSYJETmmUh17l7WXjWBzv4pUvOKIX67C0UzMUy4jZYwC53MymhX3w==} engines: {node: '>=16'} cpu: [arm64] os: [darwin] - '@cloudflare/workerd-linux-64@1.20241106.1': - resolution: {integrity: sha512-Ih+Ye8E1DMBXcKrJktGfGztFqHKaX1CeByqshmTbODnWKHt6O65ax3oTecUwyC0+abuyraOpAtdhHNpFMhUkmw==} + '@cloudflare/workerd-linux-64@1.20241205.0': + resolution: {integrity: sha512-OYA7S5zpumMamWEW+IhhBU6YojIEocyE5X/YFPiTOCrDE3dsfr9t6oqNE7hxGm1VAAu+Irtl+a/5LwmBOU681w==} engines: {node: '>=16'} cpu: [x64] os: [linux] - '@cloudflare/workerd-linux-arm64@1.20241106.1': - resolution: {integrity: sha512-mdQFPk4+14Yywn7n1xIzI+6olWM8Ybz10R7H3h+rk0XulMumCWUCy1CzIDauOx6GyIcSgKIibYMssVHZR30ObA==} + '@cloudflare/workerd-linux-arm64@1.20241205.0': + resolution: {integrity: sha512-qAzecONjFJGIAVJZKExQ5dlbic0f3d4A+GdKa+H6SoUJtPaWiE3K6WuePo4JOT7W3/Zfh25McmX+MmpMUUcM5Q==} engines: {node: '>=16'} cpu: [arm64] os: [linux] - '@cloudflare/workerd-windows-64@1.20241106.1': - resolution: {integrity: sha512-4rtcss31E/Rb/PeFocZfr+B9i1MdrkhsTBWizh8siNR4KMmkslU2xs2wPaH1z8+ErxkOsHrKRa5EPLh5rIiFeg==} + '@cloudflare/workerd-windows-64@1.20241205.0': + resolution: {integrity: sha512-BEab+HiUgCdl6GXAT7EI2yaRtDPiRJlB94XLvRvXi1ZcmQqsrq6awGo6apctFo4WUL29V7c09LxmN4HQ3X2Tvg==} engines: {node: '>=16'} cpu: [x64] os: [win32] - '@cloudflare/workers-shared@0.7.1': - resolution: {integrity: sha512-46cP5FCrl3TrvHeoHLb5SRuiDMKH5kc9Yvo36SAfzt8dqJI/qJRoY1GP3ioHn/gP7v2QIoUOTAzIl7Ml7MnfrA==} + '@cloudflare/workers-shared@0.11.0': + resolution: {integrity: sha512-A+lQ8xp7992qSeMmuQ0ssL6CPmm+ZmAv6Ddikan0n1jjpMAic+97l7xtVIsswSn9iLMFPYQ9uNN/8Fl0AgARIQ==} engines: {node: '>=16.7.0'} - '@cloudflare/workers-types@4.20241112.0': - resolution: {integrity: sha512-Q4p9bAWZrX14bSCKY9to19xl0KMU7nsO5sJ2cTVspHoypsjPUMeQCsjHjmsO2C4Myo8/LPeDvmqFmkyNAPPYZw==} + '@cloudflare/workers-types@4.20241205.0': + resolution: {integrity: sha512-pj1VKRHT/ScQbHOIMFODZaNAlJHQHdBSZXNIdr9ebJzwBff9Qz8VdqhbhggV7f+aUEh8WSbrsPIo4a+WtgjUvw==} '@corex/deepmerge@4.0.43': resolution: {integrity: sha512-N8uEMrMPL0cu/bdboEWpQYb/0i2K5Qn8eCsxzOmxSggJbbQte7ljMRoXm917AbntqTGOzdTu+vP3KOOzoC70HQ==} @@ -712,8 +699,8 @@ packages: search-insights: optional: true - '@edgeandnode/common@6.30.0': - resolution: {integrity: sha512-B9dmEXc1R3vMb/Y9LKdDffNptj49gNHubJocCMPoUD1uT5EEta3ssf+WBFXhYqAoEkA37uDtXSduzwovppXVqA==} + '@edgeandnode/common@6.38.0': + resolution: {integrity: sha512-MghQ6o72e3bNtTuqUNiL2tZf5JPoY8gT2zXMRJeiLFaekzxF9CKHEghCG4JpqaIxsKqAcNZ9s/GLFDsyEg7MtA==} '@edgeandnode/eslint-config@2.0.3': resolution: {integrity: sha512-I89EK3cJNmJqJH1zLwyoKFFP6lrOWnPnZDgo8/Ew7BpOOA1Qhqcu0ek6erAo+mDt/4/4hlEu0Agrewr80NcImA==} @@ -724,8 +711,8 @@ packages: typescript: optional: true - '@edgeandnode/gds@5.34.0': - resolution: {integrity: sha512-YXAheTlMpbReEwN+r+QeUtvAc+yNUuyXtSdRBzra3fsb30i+M+Gr6VZ9Cwq12lbj5GRSLfL6JJCTcR4gnVni9w==} + '@edgeandnode/gds@5.39.1': + resolution: {integrity: sha512-cJHuca1UcLT6Tu8kMbySusXzPWZJUI/Kq/F11jWeYpl4P63CJoVHJBea3+WavN4K5FwaIo/Op7OmIo4b9NBPTA==} peerDependencies: '@emotion/react': ^11 dayjs: ^1.11 @@ -737,11 +724,11 @@ packages: next: optional: true - '@edgeandnode/go@6.60.0': - resolution: {integrity: sha512-ehyT7dck1uM3U6HA/NP3k2HIWC2ihW1zZHje9Q+7Xfm+5UxNWxs+hJ+UBg3+YNkcuq68Yy5AVhMWO1PViSoC0g==} + '@edgeandnode/go@6.74.0': + resolution: {integrity: sha512-VWzATmJ/4fcuTs6kWhkHOcnKN1NYNpMH3lw7bWx/KSkwzNnDEqyDmDD47gxG6mnaSSyPtPdTg7bf6jxyfrjg8A==} peerDependencies: - '@edgeandnode/common': ^6.28.3 - '@edgeandnode/gds': ^5.34.0 + '@edgeandnode/common': ^6.38.0 + '@edgeandnode/gds': ^5.39.1 '@emotion/react': ^11 next: '>=13' react: ^18 @@ -751,11 +738,11 @@ packages: next: optional: true - '@emotion/babel-plugin@11.12.0': - resolution: {integrity: sha512-y2WQb+oP8Jqvvclh8Q55gLUyb7UFvgv7eJfsj7td5TToBrIUtPay2kMrZi4xjq9qw2vD0ZR5fSho0yqoFgX7Rw==} + '@emotion/babel-plugin@11.13.5': + resolution: {integrity: sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ==} - '@emotion/cache@11.13.1': - resolution: {integrity: sha512-iqouYkuEblRcXmylXIwwOodiEK5Ifl7JcX7o6V4jI3iW4mLXX3dmt5xwBtIkJiQEXFAI+pC8X0i67yiPkH9Ucw==} + '@emotion/cache@11.14.0': + resolution: {integrity: sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA==} '@emotion/hash@0.9.2': resolution: {integrity: sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==} @@ -772,8 +759,8 @@ packages: '@emotion/memoize@0.9.0': resolution: {integrity: sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==} - '@emotion/react@11.13.3': - resolution: {integrity: sha512-lIsdU6JNrmYfJ5EbUCf4xW1ovy5wKQ2CkPRM4xogziOxH1nXxBSjpC9YqbFAP7circxMfYp+6x676BqWcEiixg==} + '@emotion/react@11.14.0': + resolution: {integrity: sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA==} peerDependencies: '@types/react': '*' react: '>=16.8.0' @@ -781,8 +768,8 @@ packages: '@types/react': optional: true - '@emotion/serialize@1.3.2': - resolution: {integrity: sha512-grVnMvVPK9yUVE6rkKfAJlYZgo0cu3l9iMC77V7DW6E1DUIrU68pSEXRmFZFOFB1QFo57TncmOcvcbMDWsL4yA==} + '@emotion/serialize@1.3.3': + resolution: {integrity: sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA==} '@emotion/sheet@1.4.0': resolution: {integrity: sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==} @@ -790,13 +777,13 @@ packages: '@emotion/unitless@0.10.0': resolution: {integrity: sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==} - '@emotion/use-insertion-effect-with-fallbacks@1.1.0': - resolution: {integrity: sha512-+wBOcIV5snwGgI2ya3u99D7/FJquOIniQT1IKyDsBmEgwvpxMNeS65Oib7OnE2d2aY+3BU4OiH+0Wchf8yk3Hw==} + '@emotion/use-insertion-effect-with-fallbacks@1.2.0': + resolution: {integrity: sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg==} peerDependencies: react: '>=16.8.0' - '@emotion/utils@1.4.1': - resolution: {integrity: sha512-BymCXzCG3r72VKJxaYVwOXATqXIZ85cuvg0YOUDxMGNrKc1DJRZk8MgV5wyXRyEayIMd4FuXJIUgTBXvDNW5cA==} + '@emotion/utils@1.4.2': + resolution: {integrity: sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA==} '@emotion/weak-memoize@0.4.0': resolution: {integrity: sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==} @@ -812,11 +799,11 @@ packages: '@envelop/core': ^5.0.2 graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 - '@envelop/graphql-jit@8.0.3': - resolution: {integrity: sha512-IZnKc7dVOQV9jEi5s5RkG8fVKqc6Ss/mBN9PRt2iYFa9o6XkL/haPLJRfWFsS/CSJfFOQuzLyxYuALA8DaoOYw==} + '@envelop/graphql-jit@8.0.4': + resolution: {integrity: sha512-3vchsMVsPyCmBMDkRPvjAqhJjyunQW7wZ5zXx6q+gfiEVcaetDJZwK+0ODo+IOxO5eNHqQ9u4Z2QiRTCCuvXgA==} engines: {node: '>=18.0.0'} peerDependencies: - '@envelop/core': ^5.0.0 + '@envelop/core': ^5.0.2 graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 '@envelop/types@5.0.0': @@ -1391,14 +1378,14 @@ packages: cpu: [x64] os: [win32] - '@eslint-community/eslint-utils@4.4.0': - resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} + '@eslint-community/eslint-utils@4.4.1': + resolution: {integrity: sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 - '@eslint-community/regexpp@4.11.1': - resolution: {integrity: sha512-m4DVN9ZqskZoLU5GlWZadwDnYo3vAEydiUayB9widCl9ffWx2IvPnp6n3on5rJmziJSw9Bv+Z3ChDVdMwXCY8Q==} + '@eslint-community/regexpp@4.12.1': + resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} '@eslint/eslintrc@2.1.4': @@ -1518,16 +1505,16 @@ packages: '@fastify/merge-json-schemas@0.1.1': resolution: {integrity: sha512-fERDVz7topgNjtXsJTTW1JKLy0rhuLRcquYqNR9rF7OcVpCa2OVW49ZPDIhaRRCaUuvVxI+N416xUoF76HNSXA==} - '@figma/code-connect@1.2.0': - resolution: {integrity: sha512-iwLtg+DyRZaPYY24+scK8nhBBHUGrrOnz2G/4gIOc2AqwlyO2wKFPohvCdjL9wmHseGGxVXXzICxdkJQRKv7kw==} - engines: {node: '>=16'} + '@figma/code-connect@1.2.4': + resolution: {integrity: sha512-IzSmbHhUgA44a4OyYSkB+mU3tCVPheWAFIFN7O81He4l3EMy1Ac6bxu/rFjoQP0WzXGr7X77AflB8Za7pKAT3Q==} + engines: {node: '>=18'} hasBin: true '@floating-ui/core@1.6.8': resolution: {integrity: sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA==} - '@floating-ui/dom@1.6.11': - resolution: {integrity: sha512-qkMCxSR24v2vGkhYDo/UzxfJN3D4syqSjyuTFz6C7XcpU1pASPRieNI0Kj5VP3/503mOfYiGY891ugBX1GlABQ==} + '@floating-ui/dom@1.6.12': + resolution: {integrity: sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==} '@floating-ui/react-dom@2.1.2': resolution: {integrity: sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==} @@ -1535,8 +1522,8 @@ packages: react: '>=16.8.0' react-dom: '>=16.8.0' - '@floating-ui/react@0.26.25': - resolution: {integrity: sha512-hZOmgN0NTOzOuZxI1oIrDu3Gcl8WViIkvPMpB4xdd4QD6xAMtwgwr3VPoiyH/bLtRcS1cDnhxLSD1NsMJmwh/A==} + '@floating-ui/react@0.26.28': + resolution: {integrity: sha512-yORQuuAtVpiRjpMhdc0wJj06b9JFjrYF4qp96j++v2NBpbi6SEGF7donUJ3TMieerQ6qVkAv1tgr7L4r5roTqw==} peerDependencies: react: '>=16.8.0' react-dom: '>=16.8.0' @@ -1544,20 +1531,20 @@ packages: '@floating-ui/utils@0.2.8': resolution: {integrity: sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==} - '@formatjs/ecma402-abstract@2.2.0': - resolution: {integrity: sha512-IpM+ev1E4QLtstniOE29W1rqH9eTdx5hQdNL8pzrflMj/gogfaoONZqL83LUeQScHAvyMbpqP5C9MzNf+fFwhQ==} + '@formatjs/ecma402-abstract@2.3.1': + resolution: {integrity: sha512-Ip9uV+/MpLXWRk03U/GzeJMuPeOXpJBSB5V1tjA6kJhvqssye5J5LoYLc7Z5IAHb7nR62sRoguzrFiVCP/hnzw==} - '@formatjs/fast-memoize@2.2.1': - resolution: {integrity: sha512-XS2RcOSyWxmUB7BUjj3mlPH0exsUzlf6QfhhijgI941WaJhVxXQ6mEWkdUFIdnKi3TuTYxRdelsgv3mjieIGIA==} + '@formatjs/fast-memoize@2.2.5': + resolution: {integrity: sha512-6PoewUMrrcqxSoBXAOJDiW1m+AmkrAj0RiXnOMD59GRaswjXhm3MDhgepXPBgonc09oSirAJTsAggzAGQf6A6g==} - '@formatjs/icu-messageformat-parser@2.8.0': - resolution: {integrity: sha512-r2un3fmF9oJv3mOkH+wwQZ037VpqmdfahbcCZ9Lh+p6Sx+sNsonI7Zcr6jNMm1s+Si7ejQORS4Ezlh05mMPAXA==} + '@formatjs/icu-messageformat-parser@2.9.7': + resolution: {integrity: sha512-cuEHyRM5VqLQobANOjtjlgU7+qmk9Q3fDQuBiRRJ3+Wp3ZoZhpUPtUfuimZXsir6SaI2TaAJ+SLo9vLnV5QcbA==} - '@formatjs/icu-skeleton-parser@1.8.4': - resolution: {integrity: sha512-LMQ1+Wk1QSzU4zpd5aSu7+w5oeYhupRwZnMQckLPRYhSjf2/8JWQ882BauY9NyHxs5igpuQIXZDgfkaH3PoATg==} + '@formatjs/icu-skeleton-parser@1.8.11': + resolution: {integrity: sha512-8LlHHE/yL/zVJZHAX3pbKaCjZKmBIO6aJY1mkVh4RMSEu/2WRZ4Ysvv3kKXJ9M8RJLBHdnk1/dUQFdod1Dt7Dw==} - '@formatjs/intl-localematcher@0.5.5': - resolution: {integrity: sha512-t5tOGMgZ/i5+ALl2/offNqAQq/lfUnKLEw0mXQI4N4bqpedhrSE+fyKLpwnd22sK0dif6AV+ufQcTsKShB9J1g==} + '@formatjs/intl-localematcher@0.5.9': + resolution: {integrity: sha512-8zkGu/sv5euxbjfZ/xmklqLyDGQSxsLqg8XOq88JW3cmJtzhCP8EtSJXlaKZnVO4beEaoiT9wj4eIoCQ9smwxA==} '@graphprotocol/client-add-source-name@2.0.7': resolution: {integrity: sha512-tAEBHwvpjq0bCRDrup8AW6mC8+rMulyrrZXp5xF0Eu924nl4XJOYklpzNp0B8oHs4HI+equHhXlXV6nqQ8jPcg==} @@ -1609,8 +1596,8 @@ packages: '@graphql-tools/merge': ^8.3.14 || ^9.0.0 graphql: ^15.2.0 || ^16.0.0 - '@graphprotocol/common-ts@2.0.10': - resolution: {integrity: sha512-7aAoskggNyMTr54es+7C6sXbSxEz0qxeqVeMqslYnLSAhUtO/TF0UOLdRa06A6RhaVZl5xKh4FUPC1Lr2NiBwQ==} + '@graphprotocol/common-ts@2.0.11': + resolution: {integrity: sha512-WtQGYMGVwaXDIli+OCAZUSqh8+ql9THzjztqvLGeSbAIPKxysvej9vua0voMguqEkI/RyEEMBajelodMzzZlEw==} '@graphprotocol/contracts@5.3.3': resolution: {integrity: sha512-fmFSKr+VDinWWotj2q/Ztn92PppcRrYXeO/62gLgkLos/DcYa7bGWKbcOWyMUw0vsUvXxk6QAtr5o/LG3yQ1WQ==} @@ -1640,8 +1627,9 @@ packages: peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 - '@graphql-codegen/plugin-helpers@5.0.4': - resolution: {integrity: sha512-MOIuHFNWUnFnqVmiXtrI+4UziMTYrcquljaI5f/T/Bc7oO7sXcfkAvgkNWEEi9xWreYwvuer3VHCuPI/lAFWbw==} + '@graphql-codegen/plugin-helpers@5.1.0': + resolution: {integrity: sha512-Y7cwEAkprbTKzVIe436TIw4w03jorsMruvCvu0HJkavaKMQbWY+lQ1RIuROgszDbxAyM35twB5/sUvYG5oW+yg==} + engines: {node: '>=16'} peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 @@ -1650,8 +1638,8 @@ packages: peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 - '@graphql-codegen/typed-document-node@5.0.10': - resolution: {integrity: sha512-YPDUNs6x0muoVWlbY2yEs0lGxFHMTszlGDh6klT/5rqiTDTZg3zz8Wd1ZTihkcH8+V6T0AT9qDWwcx9fcS2tvQ==} + '@graphql-codegen/typed-document-node@5.0.12': + resolution: {integrity: sha512-Wsbc1AqC+MFp3maWPzrmmyHLuWCPB63qBBFLTKtO6KSsnn0KnLocBp475wkfBZnFISFvzwpJ0e6LV71gKfTofQ==} engines: {node: '>=16'} peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 @@ -1662,20 +1650,20 @@ packages: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 graphql-tag: ^2.0.0 - '@graphql-codegen/typescript-operations@4.3.0': - resolution: {integrity: sha512-ZORwMy8OgsiYd9EZUhTMd4/g5LvTFpx6Fh6dNN0cxFkqSc6KhjX0vhzWsyK8N9+ILaHSutT8UTrLMdJi35HzDQ==} + '@graphql-codegen/typescript-operations@4.4.0': + resolution: {integrity: sha512-oVlos2ySx8xIbbe8r5ZI6mOpI+OTeP14RmS2MchBJ6DL+S9G16O6+9V3Y8V22fTnmBTZkTfAAaBv4HYhhDGWVA==} engines: {node: '>=16'} peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 - '@graphql-codegen/typescript-resolvers@4.3.0': - resolution: {integrity: sha512-OOib05qdQKzVcjBZpJkcavwEmnxf70QIhSBwqrkGCMcNpFLbytfy0LFAgzn5wmuEeg3RqBceqk/c2QMUvsZcpQ==} + '@graphql-codegen/typescript-resolvers@4.4.1': + resolution: {integrity: sha512-xN/co3NofnHxpOzu5qi2Lc55C0hQZi6jJeV5mn+EnESKZBedGK0yPlaIpsUvieC6DGzGdLFA74wuSgWYULb3LA==} engines: {node: '>=16'} peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 - '@graphql-codegen/typescript@4.1.0': - resolution: {integrity: sha512-/fS53Nh6U6c58GTOxqfyKTLQfQv36P8II/vPw/fg0cdcWbALhRPls69P8vXUWjrElmLKzCrdusBWPp/r+AKUBQ==} + '@graphql-codegen/typescript@4.1.2': + resolution: {integrity: sha512-GhPgfxgWEkBrvKR2y77OThus3K8B6U3ESo68l7+sHH1XiL2WapK5DdClViblJWKQerJRjfJu8tcaxQ8Wpk6Ogw==} engines: {node: '>=16'} peerDependencies: graphql: ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 @@ -1685,18 +1673,28 @@ packages: peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 - '@graphql-codegen/visitor-plugin-common@5.4.0': - resolution: {integrity: sha512-tL7hOrO+4MiNfDiHewhRQCiH9GTAh0M9Y/BZxYGGEdnrfGgqK5pCxtjq7EY/L19VGIyU7hhzYTQ0r1HzEbB4Jw==} + '@graphql-codegen/visitor-plugin-common@5.6.0': + resolution: {integrity: sha512-PowcVPJbUqMC9xTJ/ZRX1p/fsdMZREc+69CM1YY+AlFng2lL0zsdBskFJSRoviQk2Ch9IPhKGyHxlJCy9X22tg==} engines: {node: '>=16'} peerDependencies: graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 + '@graphql-hive/gateway-abort-signal-any@0.0.1': + resolution: {integrity: sha512-H2z8EwwzUf3y8U4ivlP5oCagS/bgom7hqcSr81oC3LQkf6NDKEzLRJ6Zw9aS7wCZcDPRQOwZXgT0P0CZu8pFwQ==} + engines: {node: '>=18.0.0'} + '@graphql-inspector/core@6.1.0': resolution: {integrity: sha512-5/kqD5330duUsfMBfhMc0iVld76JwSKTkKi7aOr1x9MvSnP8p1anQo7BCNZ5VY9+EvWn4njHbkNfdS/lrqsi+A==} engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 + '@graphql-inspector/core@6.2.1': + resolution: {integrity: sha512-PxL3fNblfKx/h/B4MIXN1yGHsGdY+uuySz8MAy/ogDk7eU1+va2zDZicLMEBHf7nsKfHWCAN1WFtD1GQP824NQ==} + engines: {node: '>=18.0.0'} + peerDependencies: + graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 + '@graphql-mesh/cache-localforage@0.98.10': resolution: {integrity: sha512-VXY04mA2rvNF9tzzQo5hlGdpCYx0NAXh14VvI738dHFyAFMYWmTFxxYGg8bEoX33WCXWugwznUfKQur8T4qx7w==} engines: {node: '>=16.0.0'} @@ -1726,11 +1724,10 @@ packages: graphql: '*' tslib: ^2.4.0 - '@graphql-mesh/cross-helpers@0.4.7': - resolution: {integrity: sha512-r/mIYVKiFJukPJleytYjCVSklB/5/XWzm2sApcVHUIi+UPXK5AQkdCNMUUIvQvPMlZo+bqVFwUhqPvsOjNxW5w==} + '@graphql-mesh/cross-helpers@0.4.9': + resolution: {integrity: sha512-p0Ds6tfD/icTiCAyul+/5M4Q4iS/MAc1GQyWWwE8Y3fjacvGvQiLUPFV7gDxUbGHJ13GDvibJgOBFw+KTHZfQA==} engines: {node: '>=16.0.0'} peerDependencies: - '@graphql-tools/utils': ^10.5.5 graphql: '*' '@graphql-mesh/fusion-runtime@0.8.14': @@ -1784,13 +1781,13 @@ packages: graphql: '*' tslib: ^2.4.0 - '@graphql-mesh/runtime@0.103.9': - resolution: {integrity: sha512-3mTlJOIqHea5srs3IzB1eMj6gR9gW17ZJF0OlsMkLnbLnU3QcMRuSYvy65E741ppORYysrt07eJMsz4WWk3U1w==} + '@graphql-mesh/runtime@0.103.12': + resolution: {integrity: sha512-rHLo/gYNfbsfBGnl6+TmDz6bcNcTUpED0Wl/s1hcczgGPsNArwDMTqVJiy6LrRV1g1URNes7xq0IApxpwWS3Ag==} engines: {node: '>=16.0.0'} peerDependencies: '@graphql-mesh/cross-helpers': ^0.4.7 - '@graphql-mesh/types': ^0.102.8 - '@graphql-mesh/utils': ^0.102.8 + '@graphql-mesh/types': ^0.102.11 + '@graphql-mesh/utils': ^0.102.11 '@graphql-tools/utils': ^10.5.5 graphql: '*' tslib: ^2.4.0 @@ -1806,6 +1803,12 @@ packages: graphql: '*' tslib: ^2.4.0 + '@graphql-mesh/store@0.103.8': + resolution: {integrity: sha512-uiE2E7Tu74WldpTMV8qH0FOor6CcjOG0rydQNSTAIIQ/QNsz0+yLw0AcybgNygNqqB3iFqjiIod1gOgTlKlhOQ==} + engines: {node: '>=16.0.0'} + peerDependencies: + graphql: '*' + '@graphql-mesh/store@0.98.10': resolution: {integrity: sha512-WmiemyZ1aWDQeaDci032fUU+vsM67SX4Ek9NfxLz9X16YsYPr5LXZqamcc6oACB5t67lYEflzLEhkcZcQM31Qw==} engines: {node: '>=16.0.0'} @@ -1817,38 +1820,41 @@ packages: graphql: '*' tslib: ^2.4.0 - '@graphql-mesh/string-interpolation@0.5.6': - resolution: {integrity: sha512-zcBCc68lzNAqiccVhq/lCVtAQi6zHiyu0VdIPdfq48P7wd95J9nBqdJI21caOVKiR5giDpRQL/uMFMzRoQYkRA==} + '@graphql-mesh/string-interpolation@0.5.7': + resolution: {integrity: sha512-LzwlaL1BeGcB63Ay8Z/O/HxW48VngYH5JZlrq4bM4HZ1pfyjDrhjoChOacTgjRBGoOi0ygpK3d0BmXyU4hoThw==} engines: {node: '>=16.0.0'} peerDependencies: graphql: '*' - tslib: ^2.4.0 - '@graphql-mesh/transform-type-merging@0.102.8': - resolution: {integrity: sha512-344zmHasxqnAtvy78VhBYG4W3OmliRIWRyi3roKR1N+9QO9lBD3Fdnmelt2eeVgxskQLwow4B3zZ/EjjpzS7mQ==} + '@graphql-mesh/transform-type-merging@0.102.13': + resolution: {integrity: sha512-xWdURCm1eFShZv2TYv6Jah2QmGlWeyKCASRjdYnS5hi/acwwAgF4nWl+KEL/lXoTWUcGv6zdE+An7nql38ymaw==} engines: {node: '>=16.0.0'} peerDependencies: - '@graphql-mesh/types': ^0.102.8 - '@graphql-mesh/utils': ^0.102.8 + '@graphql-mesh/types': ^0.102.13 + '@graphql-mesh/utils': ^0.102.13 graphql: '*' tslib: ^2.4.0 - '@graphql-mesh/transport-common@0.7.9': - resolution: {integrity: sha512-WgBzihMSxOmz3WArqerZjpy9UCuPPnhomwomrzhoQQjmJcynSg60Ufg9erO7OzO7ANlM6r757pW3QKOJG+7+EA==} + '@graphql-mesh/transport-common@0.7.23': + resolution: {integrity: sha512-mUypw/vnD0t5zLZNlfsyLYRt6MXc5VUMB2Ah4aoKtBeiUcmypfoMcUU/+Nu7l9pWFFI0gWBzeKV+PCXIGLcmwA==} + engines: {node: '>=18.0.0'} + peerDependencies: + graphql: ^15.9.0 || ^16.9.0 + + '@graphql-mesh/types@0.102.13': + resolution: {integrity: sha512-2C/fN2dMd0XyNgZBzpv3F2FMPgtOu/aYZNLL6ne6RfTw5BqeYh7tNVJtO+tloHExmwBCSMeVOh27MQSZLtxq+w==} engines: {node: '>=16.0.0'} peerDependencies: - '@graphql-mesh/types': ^0.102.8 + '@graphql-mesh/store': ^0.102.13 + '@graphql-tools/utils': ^10.5.5 graphql: '*' tslib: ^2.4.0 - '@graphql-mesh/types@0.102.8': - resolution: {integrity: sha512-9S1L0NH7pt+ff1fq1cxR2ZeJ1HyG+ZO719kvgG3g0qP9u/diC9Wv/uge30dATfY6ZqPR0cjsez/T7BRGFQOMwA==} + '@graphql-mesh/types@0.103.8': + resolution: {integrity: sha512-cdbAa2ZEYnMq+fgioHfXUEuR5yu+zh5aaFtAbHnld9CDZr+9uQ9Cot0loAUH1b1zVatptBLgzC//KqKiwr6hLA==} engines: {node: '>=16.0.0'} peerDependencies: - '@graphql-mesh/store': ^0.102.8 - '@graphql-tools/utils': ^10.5.5 graphql: '*' - tslib: ^2.4.0 '@graphql-mesh/types@0.98.10': resolution: {integrity: sha512-sVT+1f0Pws92aVgL1Bfnq96FL0wzbM4drEfSgK7qdW7Crq1+Fkp017zz333SMjuw442HiYIf9m3yPXois4bqKg==} @@ -1859,16 +1865,22 @@ packages: graphql: '*' tslib: ^2.4.0 - '@graphql-mesh/utils@0.102.8': - resolution: {integrity: sha512-bGYbtBrmWmsbIZEsLctQBysnSPeEIDDgynL0qQbh/S56/xM4u2VenjmkruSTR4BrF6YrqmWzrdmsWA0Gegd9Pg==} + '@graphql-mesh/utils@0.102.13': + resolution: {integrity: sha512-Bj2t8FbeytZPz5Zce+Gs9p766wUyJP8uWp7uLEls4gGyTnhoGdppyzg6YbhPJFbY34JBwxlac/G7G8sWTtsKjA==} engines: {node: '>=16.0.0'} peerDependencies: '@graphql-mesh/cross-helpers': ^0.4.7 - '@graphql-mesh/types': ^0.102.8 + '@graphql-mesh/types': ^0.102.13 '@graphql-tools/utils': ^10.5.5 graphql: '*' tslib: ^2.4.0 + '@graphql-mesh/utils@0.103.8': + resolution: {integrity: sha512-9Pfi0MikmQQdzQLt14Fu+KPwl91poEN0ue5A9MmbtznTgfqUg35IZY1DWtA/rZcmxFpxOe7zM3tJcCpSvpxJiA==} + engines: {node: '>=16.0.0'} + peerDependencies: + graphql: '*' + '@graphql-mesh/utils@0.98.10': resolution: {integrity: sha512-PfljkWywZhYdCxdhGjpITeeZhIWFFFYBEcdrDp8UkZ6jUsWr4Ykd7pgd6xbRc2O/O/MxjheYKKQoZRUp9xeSSA==} engines: {node: '>=16.0.0'} @@ -1879,86 +1891,86 @@ packages: graphql: '*' tslib: ^2.4.0 - '@graphql-tools/batch-delegate@9.0.8': - resolution: {integrity: sha512-VDxa6O72j6i7GEhuPF3n+4dd5GWjLHUGj09GTSmNhp7z6lPxgXEyBlmi0JhfpLmmemCnTExicL6CSgtP6TlRuQ==} - engines: {node: '>=16.0.0'} + '@graphql-tools/batch-delegate@9.0.24': + resolution: {integrity: sha512-HjJqQcE4AgSUyYOEOiUXugwS6EH0efTOJu2OTYuxhnoAy2Qdo2kzWz0rbTP4RXXSUESWGJ/QuUnCObi5INZEvg==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/batch-execute@9.0.5': - resolution: {integrity: sha512-wkHLqBNtprKuNk+6ZoOw/RthsnGDycIjtOo976K8f0IgbE7fRNO9SnyhjSziHaIWVDjOuP3XaJD5v/i3vQsa5Q==} - engines: {node: '>=16.0.0'} + '@graphql-tools/batch-execute@9.0.10': + resolution: {integrity: sha512-nCRNFq2eqy+ONDknd8DfqidY/Ljgyq67Q0Hb9SMJ3FOWpKrApqmNT9J1BA3JW4r+/zIGtM1VKi+P9FYu3zMHHA==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/code-file-loader@8.1.4': - resolution: {integrity: sha512-vwMk+trCGLidWTmwC5CybqN0+W9fG6VMf61HEggUGBcYLzUmTAIn9DXsU1IFeLRtn8rNx8xH4JpDGd6fv0YWUQ==} + '@graphql-tools/code-file-loader@8.1.9': + resolution: {integrity: sha512-i2Trxb84OvDRFdTb0dURmCFyeNwDpXVZYCLShtjTnGLij4Cjwb7Q4FOWLWgwdFaBvebfRTUVrpl1HX1PxAc7cQ==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/delegate@10.0.26': - resolution: {integrity: sha512-8KaphA86onhO8h9WJeu7YpRNwYDkbbD+KctV6LPJ99vK3w+rQuWkZoxrL1H2nN2FwDBP/9OXposeE7z5C6cv8w==} - engines: {node: '>=16.0.0'} + '@graphql-tools/delegate@10.2.8': + resolution: {integrity: sha512-pUnsfsczDleGwixW18QLXBFGFqaJ12ApHaSZbbwoIqir/kZEl0Oqa9n5VDYxml0glVvK+AjYJzC3gJ+F/refvA==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/executor-graphql-ws@1.3.1': - resolution: {integrity: sha512-UAS5aeWLqv89iJ899OK8uwBMVGVH4nhJDIuIT+8z8f5iPiIpfqt2ipZLasdSLpi5WUpYDIolnVUFd2NvzccO7A==} - engines: {node: '>=16.0.0'} + '@graphql-tools/executor-graphql-ws@1.3.5': + resolution: {integrity: sha512-8BZf9a9SkaJAkF5Byb4ZdiwzCNoTrfl515m206XvCkCHM7dM1AwvX1rYZTrnJWgXgQUxhPjvll5vgciOe1APaA==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/executor-http@1.1.7': - resolution: {integrity: sha512-iWTE1MtCW26jxs5DeXsUNPkIFmVWEhioJx0wcDSacJ0onXjyMalfae5SgsuwHMQCVuvvUtQUgb8a9hmPhQ0y+g==} - engines: {node: '>=16.0.0'} + '@graphql-tools/executor-http@1.2.1': + resolution: {integrity: sha512-tBmw6v/hYKS7/oK7gnz7Niqk1YYt3aCwwXRudbsEQTlBBi7b2HMhQzdABX5QSv1XlNBvQ6ey4fqQgJhY4oyPwQ==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/executor-legacy-ws@1.1.1': - resolution: {integrity: sha512-9J5WBd9D7+V299BsMJmgMVBsUl01rqzpfWx+if2r5k9xBYchj5delUOsx337XtNLb3Ewoy0Za24DkNYIx3Cgyg==} + '@graphql-tools/executor-legacy-ws@1.1.6': + resolution: {integrity: sha512-kKP5iVAaaXTjdAHyJFGGtXOjBWKer+IDI4pF9nJuU1FM9bxvZQQtZ0aW8NO/sBZlJus+bbLHzVgywkP4Y8E01w==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/executor@1.3.2': - resolution: {integrity: sha512-U8nAR709IPNjwf0aLG6U9FlX0t7vA4cdWvL4RtMR/L/Ll4OHZ39OqUtq6moy+kLRRwLTqLif6iiUYrxnWpUGXw==} + '@graphql-tools/executor@1.3.8': + resolution: {integrity: sha512-tiZ8/PaQ+wkdZeCSyHa7vOUqCJndnpnN5ilUpi5UwsFrFFyN71sr4NJeib7Txf1VdufJNB4ed/0yFd39O0L3AQ==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/federation@2.2.17': - resolution: {integrity: sha512-PCyXWKYRNJFAN+hu//wUg/iOtLHqa0hX5kn0CVfMuB0Kj//nue2nQRmY8gknlopkz+xuuG0aVLIo5bkYGGPxgw==} - engines: {node: '>=16.0.0'} + '@graphql-tools/federation@2.2.40': + resolution: {integrity: sha512-2gCoFQptXJb/+hz2tG75WNRpS9+1jMyfRU/UUjGlcg/ysHcjyPB6KfUzc2/9JPdY6B3ocQunqJaF2vueiEKpJg==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/graphql-file-loader@8.0.2': - resolution: {integrity: sha512-uf/vkO7jIU19hOZKL/DPyE5vm3wH7nFpfNYrMGGx8XlDK7l0al/MO7HQy+4YUPENkAd8FBgRNt2Ilm1fUXCwJg==} + '@graphql-tools/graphql-file-loader@8.0.7': + resolution: {integrity: sha512-WZD4bMbI/WhRY4dUQNzTKAr15M2Ayr1bhxacejp4dhYzZq7WUGZw1CfPnphYhptkAUZWdaydhJmq7CnIYJO2sA==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/graphql-tag-pluck@8.3.3': - resolution: {integrity: sha512-G+8UNUa54ct/f9hNHo7Ez61BeAoaeXYhtfq8rYu0m9Upr/BCgsQmuvEgyHBRSFVkqOQj56H5aBwKW68SPrrU8g==} + '@graphql-tools/graphql-tag-pluck@8.3.8': + resolution: {integrity: sha512-qo6dhmOFsEoiUI9EDNVd2GbOCkshHLPpofdCpHZ3QfRvpwa1/tNlHqMjbZ7gGTQSGKqngi9LZoQp4D6nXvxEpg==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/import@7.0.2': - resolution: {integrity: sha512-7OpShcq/yRwCcMcTyLNIonYw9l1yD+Im/znN/l9SRsThYGhMlojEHIntn7f9IArCnHR71uZk5UQioGLUTG6E6A==} + '@graphql-tools/import@7.0.7': + resolution: {integrity: sha512-UAlN3zlKLYmzBZoUtL64TwzrxFzGJDBO3Ut6cz7eGa7V2m6b4rm9U/8ospz9TrXOtqX9TCawyTh93fPr2bEfvQ==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/load@8.0.3': - resolution: {integrity: sha512-JE/MdTMcaIQ68U9zaizXG3QkR4Qligv131JVVmVJScxA1gv0gIc+HDixa5YK1rBXYLANU1sZMk87ZVuPaUdAoQ==} + '@graphql-tools/load@8.0.8': + resolution: {integrity: sha512-5B96YiUs4i6UyzVHZJFSP4k8PCj0LS5P5KnZ2HIoSHyMpbzoWueTIlZ4sEorQwBjHkQj6TEoXIPfjivxwPNr7Q==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/merge@9.0.8': - resolution: {integrity: sha512-RG9NEp4fi0MoFi0te4ahqTMYuavQnXlpEZxxMomdCa6CI5tfekcVm/rsLF5Zt8O4HY+esDt9+4dCL+aOKvG79w==} + '@graphql-tools/merge@9.0.13': + resolution: {integrity: sha512-OSEOaFOjdkAwR6umRHrTrKjYANbh/0OBb1W8B21dxu8XPaOeoCuShDGXY6ZpragiO8Ke0qFXZGwJGg8ZbDPfvQ==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 @@ -1979,44 +1991,44 @@ packages: peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/relay-operation-optimizer@7.0.2': - resolution: {integrity: sha512-sdoGBfe6+OXcPYUBMla3KKvf56bk0wCRY2HL4qK/CNP+7752Nx6s24aBqZ5vrnB3tleddAfnG4gvy0JuHfmA+A==} + '@graphql-tools/relay-operation-optimizer@7.0.7': + resolution: {integrity: sha512-8hWMbcRn7gjLFhp9GsCuYqtL8qIt/bwTnK1MynuQWz0NetvVVNCUckILy/5vk4qFoJIobtNJd7XW6Xvv4TN+Ug==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/schema@10.0.4': - resolution: {integrity: sha512-HuIwqbKxPaJujox25Ra4qwz0uQzlpsaBOzO6CVfzB/MemZdd+Gib8AIvfhQArK0YIN40aDran/yi+E5Xf0mQww==} + '@graphql-tools/schema@10.0.12': + resolution: {integrity: sha512-ukIZBdD4jI94ren5GK6nnHe+YvDVOfoI8cz50pdE1+FYf9NSFUu7HJXmIBHGIIWFbE5lz4qb5MfUeuBkffs3lw==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/schema@10.0.7': - resolution: {integrity: sha512-Cz1o+rf9cd3uMgG+zI9HlM5mPlnHQUlk/UQRZyUlPDfT+944taLaokjvj7AI6GcOFVf4f2D11XthQp+0GY31jQ==} + '@graphql-tools/schema@10.0.4': + resolution: {integrity: sha512-HuIwqbKxPaJujox25Ra4qwz0uQzlpsaBOzO6CVfzB/MemZdd+Gib8AIvfhQArK0YIN40aDran/yi+E5Xf0mQww==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/stitch@9.2.15': - resolution: {integrity: sha512-NOnmymAaXxH0ZM3t1dP9MyPsOsmKGiCayBUjjn4Ej0TgUgZoSxjn78KQaXWcQ/FwxXPLmRdJ7bpwXe3aYzG/rA==} - engines: {node: '>=16.0.0'} + '@graphql-tools/stitch@9.4.10': + resolution: {integrity: sha512-RoFtwuCpBiIfAv4naIC4dL8paErx+Cymd2hjsEEIOTgipRU7bSST04b6JtHsOrmKuCtgp+aYj3cldN1cmSGuhQ==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/stitching-directives@3.1.7': - resolution: {integrity: sha512-SVnQeU9IlOAceNyR+7TRlNVaqeksRwkOYoCz0lx4Wds2L9K+u4cPeMpPVCXVuJ1Qcl1fgWpfQmBJZLzyVVRVCA==} - engines: {node: '>=16.0.0'} + '@graphql-tools/stitching-directives@3.1.23': + resolution: {integrity: sha512-bWhawN/6DkuG2aZ3qITuRJzRG90SQXZXUMA4PkyKFo2hEhbH4aSlrLIhV/s+qMWoKoeBmpnV4D9SCMIkaQaEaw==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/url-loader@8.0.7': - resolution: {integrity: sha512-f1mq1wb1ivn8qFDVm8GWO5Co6Y/NZVXHgEG+3rjntr7aXjnw+DXyDQ+7QJRWJRDJcP0YWLJgfrBcWo1CqI4Qow==} + '@graphql-tools/url-loader@8.0.19': + resolution: {integrity: sha512-D1AhYr75ho/A+bvxSH3+SUpGRcpYmWFBZIEe0TahkHXUBX1uVUDpUnGP7zz525tBMG9/CJhZcXqlDANqmsCm7Q==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/utils@10.5.5': - resolution: {integrity: sha512-LF/UDWmMT0mnobL2UZETwYghV7HYBzNaGj0SAkCYOMy/C3+6sQdbcTksnoFaKR9XIVD78jNXEGfivbB8Zd+cwA==} + '@graphql-tools/utils@10.6.3': + resolution: {integrity: sha512-hEaQTGyQUG3DJqCaIsiu4M+jUgWUf+h6kDwC8MtGElwkL1HWi+qX2qyynw8h9WoV7STmmHDSwkk2ET1IC3nRPw==} engines: {node: '>=16.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 @@ -2031,9 +2043,9 @@ packages: peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 - '@graphql-tools/wrap@10.0.10': - resolution: {integrity: sha512-3f1CUM+EpsALjt/HofzSWCLyfY65o9VpmqCTvIwVWGOnaP82cWbZE1Ytwb+t7yAZBKqCCc+1ginp+COIPD3ULw==} - engines: {node: '>=16.0.0'} + '@graphql-tools/wrap@10.0.26': + resolution: {integrity: sha512-vCeM30vm5gtTswg1Tebn0bSBrn74axlqmu9kDrPwlqjum5ykZQjkSwuCXcGuBS/4pNhmaTirXLuUL1vP5FvEHA==} + engines: {node: '>=18.0.0'} peerDependencies: graphql: ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 @@ -2046,20 +2058,19 @@ packages: resolution: {integrity: sha512-Mg8psdkAp+YTG1OGmvU+xa6xpsAmSir0hhr3yFYPyLNwzUj95DdIwsMpKadDj9xDpYgJcH3Hp/4JMal9DhQimA==} engines: {node: '>=18.0.0'} - '@graphql-yoga/plugin-persisted-operations@3.7.0': - resolution: {integrity: sha512-jfr+rIZBXbFxUO3j87u+VWWwyzIEn4SqSogRexyTFGhFb8hott8UALf/jOBEtZUIyHzsO3O1TN1pG+TPbQFkHA==} + '@graphql-yoga/plugin-persisted-operations@3.10.5': + resolution: {integrity: sha512-a9pHMolJSYo3ONEkpKkgTI5eh64uwxc+fNXaUrntN72dTiiimRdF7LSatVTy5BRE0MeUpJe6mdnKH93GUjo0hw==} engines: {node: '>=18.0.0'} peerDependencies: - '@graphql-tools/utils': ^10.0.0 graphql: ^15.2.0 || ^16.0.0 - graphql-yoga: ^5.7.0 + graphql-yoga: ^5.10.5 - '@graphql-yoga/subscription@5.0.1': - resolution: {integrity: sha512-1wCB1DfAnaLzS+IdoOzELGGnx1ODEg9nzQXFh4u2j02vAnne6d+v4A7HIH9EqzVdPLoAaMKXCZUUdKs+j3z1fg==} + '@graphql-yoga/subscription@5.0.2': + resolution: {integrity: sha512-KGacW1FtUXR5e3qk4YmEFQRGTov8lOkpW7syjTD3EN2t5HRWrSsut2LwjVdK+HcP3H9UEuZ9RXw/+shqV+1exQ==} engines: {node: '>=18.0.0'} - '@graphql-yoga/typed-event-target@3.0.0': - resolution: {integrity: sha512-w+liuBySifrstuHbFrHoHAEyVnDFVib+073q8AeAJ/qqJfvFvAwUPLLtNohR/WDVRgSasfXtl3dcNuVJWN+rjg==} + '@graphql-yoga/typed-event-target@3.0.1': + resolution: {integrity: sha512-SWVkyFivzlDqGTBrGTWTNg+aFGP/cIiotirUFnvwuUGt2gla6UJoKhII6aPoHNg3/5vpUAL1KzyoaXMK2PO0JA==} engines: {node: '>=18.0.0'} '@hasparus/eslint-plugin@1.0.0': @@ -2072,12 +2083,12 @@ packages: react: ^16 || ^17 || ^18 react-dom: ^16 || ^17 || ^18 - '@headlessui/react@2.1.10': - resolution: {integrity: sha512-6mLa2fjMDAFQi+/R10B+zU3edsUk/MDtENB2zHho0lqKU1uzhAfJLUduWds4nCo8wbl3vULtC5rJfZAQ1yqIng==} + '@headlessui/react@2.2.0': + resolution: {integrity: sha512-RzCEg+LXsuI7mHiSomsu/gBJSjpupm6A1qIZ5sWjd7JhARNlMiSA4kKfJpCKwU9tE+zMRterhhrP74PvfJrpXQ==} engines: {node: '>=10'} peerDependencies: - react: ^18 - react-dom: ^18 + react: ^18 || ^19 || ^19.0.0-rc + react-dom: ^18 || ^19 || ^19.0.0-rc '@humanwhocodes/config-array@0.13.0': resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==} @@ -2092,17 +2103,17 @@ packages: resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} deprecated: Use @eslint/object-schema instead - '@internationalized/date@3.5.6': - resolution: {integrity: sha512-jLxQjefH9VI5P9UQuqB6qNKnvFt1Ky1TPIzHGsIlCi7sZZoMR8SdYbBGRvM0y+Jtb+ez4ieBzmiAUcpmPYpyOw==} + '@internationalized/date@3.6.0': + resolution: {integrity: sha512-+z6ti+CcJnRlLHok/emGEsWQhe7kfSmEW+/6qCzvKY67YPh7YOBfvc7+/+NXq+zJlbArg30tYpqLjNgcAYv2YQ==} - '@internationalized/message@3.1.5': - resolution: {integrity: sha512-hjEpLKFlYA3m5apldLqzHqw531qqfOEq0HlTWdfyZmcloWiUbWsYXD6YTiUmQmOtarthzhdjCAwMVrB8a4E7uA==} + '@internationalized/message@3.1.6': + resolution: {integrity: sha512-JxbK3iAcTIeNr1p0WIFg/wQJjIzJt9l/2KNY/48vXV7GRGZSv3zMxJsce008fZclk2cDC8y0Ig3odceHO7EfNQ==} - '@internationalized/number@3.5.4': - resolution: {integrity: sha512-h9huwWjNqYyE2FXZZewWqmCdkw1HeFds5q4Siuoms3hUQC5iPJK3aBmkFZoDSLN4UD0Bl8G22L/NdHpeOr+/7A==} + '@internationalized/number@3.6.0': + resolution: {integrity: sha512-PtrRcJVy7nw++wn4W2OuePQQfTqDzfusSuY1QTtui4wa7r+rGVtR75pO8CyKvHvzyQYi3Q1uO5sY0AsB4e65Bw==} - '@internationalized/string@3.2.4': - resolution: {integrity: sha512-BcyadXPn89Ae190QGZGDUZPqxLj/xsP4U1Br1oSy8yfIjmpJ8cJtGYleaodqW/EmzFjwELtwDojLkf3FhV6SjA==} + '@internationalized/string@3.2.5': + resolution: {integrity: sha512-rKs71Zvl2OKOHM+mzAFMIyqR5hI1d1O6BBkMK2/lkfg3fkmVh9Eeg0awcA8W2WqYqDOv6a86DIOlFpggwLtbuw==} '@isaacs/cliui@8.0.2': resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} @@ -2112,8 +2123,8 @@ packages: resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - '@jridgewell/gen-mapping@0.3.5': - resolution: {integrity: sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==} + '@jridgewell/gen-mapping@0.3.8': + resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==} engines: {node: '>=6.0.0'} '@jridgewell/resolve-uri@3.1.2': @@ -2243,62 +2254,62 @@ packages: '@next/env@13.5.7': resolution: {integrity: sha512-uVuRqoj28Ys/AI/5gVEgRAISd0KWI0HRjOO1CTpNgmX3ZsHb5mdn14Y59yk0IxizXdo7ZjsI2S7qbWnO+GNBcA==} - '@next/env@14.2.18': - resolution: {integrity: sha512-2vWLOUwIPgoqMJKG6dt35fVXVhgM09tw4tK3/Q34GFXDrfiHlG7iS33VA4ggnjWxjiz9KV5xzfsQzJX6vGAekA==} + '@next/env@14.2.20': + resolution: {integrity: sha512-JfDpuOCB0UBKlEgEy/H6qcBSzHimn/YWjUHzKl1jMeUO+QVRdzmTTl8gFJaNO87c8DXmVKhFCtwxQ9acqB3+Pw==} '@next/eslint-plugin-next@13.4.9': resolution: {integrity: sha512-nDtGpa992tNyAkT/KmSMy7QkHfNZmGCBYhHtafU97DubqxzNdvLsqRtliQ4FU04CysRCtvP2hg8rRC1sAKUTUA==} - '@next/swc-darwin-arm64@14.2.18': - resolution: {integrity: sha512-tOBlDHCjGdyLf0ube/rDUs6VtwNOajaWV+5FV/ajPgrvHeisllEdymY/oDgv2cx561+gJksfMUtqf8crug7sbA==} + '@next/swc-darwin-arm64@14.2.20': + resolution: {integrity: sha512-WDfq7bmROa5cIlk6ZNonNdVhKmbCv38XteVFYsxea1vDJt3SnYGgxLGMTXQNfs5OkFvAhmfKKrwe7Y0Hs+rWOg==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@next/swc-darwin-x64@14.2.18': - resolution: {integrity: sha512-uJCEjutt5VeJ30jjrHV1VIHCsbMYnEqytQgvREx+DjURd/fmKy15NaVK4aR/u98S1LGTnjq35lRTnRyygglxoA==} + '@next/swc-darwin-x64@14.2.20': + resolution: {integrity: sha512-XIQlC+NAmJPfa2hruLvr1H1QJJeqOTDV+v7tl/jIdoFvqhoihvSNykLU/G6NMgoeo+e/H7p/VeWSOvMUHKtTIg==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@next/swc-linux-arm64-gnu@14.2.18': - resolution: {integrity: sha512-IL6rU8vnBB+BAm6YSWZewc+qvdL1EaA+VhLQ6tlUc0xp+kkdxQrVqAnh8Zek1ccKHlTDFRyAft0e60gteYmQ4A==} + '@next/swc-linux-arm64-gnu@14.2.20': + resolution: {integrity: sha512-pnzBrHTPXIMm5QX3QC8XeMkpVuoAYOmyfsO4VlPn+0NrHraNuWjdhe+3xLq01xR++iCvX+uoeZmJDKcOxI201Q==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-arm64-musl@14.2.18': - resolution: {integrity: sha512-RCaENbIZqKKqTlL8KNd+AZV/yAdCsovblOpYFp0OJ7ZxgLNbV5w23CUU1G5On+0fgafrsGcW+GdMKdFjaRwyYA==} + '@next/swc-linux-arm64-musl@14.2.20': + resolution: {integrity: sha512-WhJJAFpi6yqmUx1momewSdcm/iRXFQS0HU2qlUGlGE/+98eu7JWLD5AAaP/tkK1mudS/rH2f9E3WCEF2iYDydQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-x64-gnu@14.2.18': - resolution: {integrity: sha512-3kmv8DlyhPRCEBM1Vavn8NjyXtMeQ49ID0Olr/Sut7pgzaQTo4h01S7Z8YNE0VtbowyuAL26ibcz0ka6xCTH5g==} + '@next/swc-linux-x64-gnu@14.2.20': + resolution: {integrity: sha512-ao5HCbw9+iG1Kxm8XsGa3X174Ahn17mSYBQlY6VGsdsYDAbz/ZP13wSLfvlYoIDn1Ger6uYA+yt/3Y9KTIupRg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-linux-x64-musl@14.2.18': - resolution: {integrity: sha512-mliTfa8seVSpTbVEcKEXGjC18+TDII8ykW4a36au97spm9XMPqQTpdGPNBJ9RySSFw9/hLuaCMByluQIAnkzlw==} + '@next/swc-linux-x64-musl@14.2.20': + resolution: {integrity: sha512-CXm/kpnltKTT7945np6Td3w7shj/92TMRPyI/VvveFe8+YE+/YOJ5hyAWK5rpx711XO1jBCgXl211TWaxOtkaA==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-win32-arm64-msvc@14.2.18': - resolution: {integrity: sha512-J5g0UFPbAjKYmqS3Cy7l2fetFmWMY9Oao32eUsBPYohts26BdrMUyfCJnZFQkX9npYaHNDOWqZ6uV9hSDPw9NA==} + '@next/swc-win32-arm64-msvc@14.2.20': + resolution: {integrity: sha512-upJn2HGQgKNDbXVfIgmqT2BN8f3z/mX8ddoyi1I565FHbfowVK5pnMEwauvLvaJf4iijvuKq3kw/b6E9oIVRWA==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@next/swc-win32-ia32-msvc@14.2.18': - resolution: {integrity: sha512-Ynxuk4ZgIpdcN7d16ivJdjsDG1+3hTvK24Pp8DiDmIa2+A4CfhJSEHHVndCHok6rnLUzAZD+/UOKESQgTsAZGg==} + '@next/swc-win32-ia32-msvc@14.2.20': + resolution: {integrity: sha512-igQW/JWciTGJwj3G1ipalD2V20Xfx3ywQy17IV0ciOUBbFhNfyU1DILWsTi32c8KmqgIDviUEulW/yPb2FF90w==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] - '@next/swc-win32-x64-msvc@14.2.18': - resolution: {integrity: sha512-dtRGMhiU9TN5nyhwzce+7c/4CCeykYS+ipY/4mIrGzJ71+7zNo55ZxCB7cAVuNqdwtYniFNR2c9OFQ6UdFIMcg==} + '@next/swc-win32-x64-msvc@14.2.20': + resolution: {integrity: sha512-AFmqeLW6LtxeFTuoB+MXFeM5fm5052i3MU6xD0WzJDOwku6SkZaxb1bxjBaRC8uNqTRTSPl0yMFtjNowIVI67w==} engines: {node: '>= 10'} cpu: [x64] os: [win32] @@ -2444,6 +2455,9 @@ packages: react: '>= 16.8' react-dom: '>= 16.8' + '@pinax/graph-networks-registry@0.6.5': + resolution: {integrity: sha512-Urm/C+phjJLP+W5OF7hCUMrlSqSNGfX8V2BmzXmVkME/NX1yBZCQinR3Zk2L2uedpOBjmp3o7vm7bdQy+DMHhA==} + '@pkgjs/parseargs@0.11.0': resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} @@ -2455,11 +2469,11 @@ packages: '@radix-ui/number@1.1.0': resolution: {integrity: sha512-V3gRzhVNU1ldS5XhAPTom1fOIo4ccrjjJgmE+LI2h/WaFpHmx0MQApT+KZHnx8abG6Avtfcz4WoEciMnpFT3HQ==} - '@radix-ui/primitive@1.1.0': - resolution: {integrity: sha512-4Z8dn6Upk0qk4P74xBhZ6Hd/w0mPEzOOLxy4xiPXOXqjF7jZS0VAKk7/x/H6FyY2zCkYJqePf1G5KmkmNJ4RBA==} + '@radix-ui/primitive@1.1.1': + resolution: {integrity: sha512-SJ31y+Q/zAyShtXJc8x83i9TYdbAfHZ++tUZnvjJJqFjzsdUnKsxPL6IEtBlxKkU7yzer//GQtZSV4GbldL3YA==} - '@radix-ui/react-accordion@1.2.1': - resolution: {integrity: sha512-bg/l7l5QzUjgsh8kjwDFommzAshnUsuVMV5NM56QVCm+7ZckYdd9P/ExR8xG/Oup0OajVxNLaHJ1tb8mXk+nzQ==} + '@radix-ui/react-accordion@1.2.2': + resolution: {integrity: sha512-b1oh54x4DMCdGsB4/7ahiSrViXxaBwRPotiZNnYXjLha9vfuURSAZErki6qjDoSIV0eXx5v57XnTGVtGwnfp2g==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2471,8 +2485,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-alert-dialog@1.1.2': - resolution: {integrity: sha512-eGSlLzPhKO+TErxkiGcCZGuvbVMnLA1MTnyBksGOeGRGkxHiiJUujsjmNTdWTm4iHVSRaUao9/4Ur671auMghQ==} + '@radix-ui/react-alert-dialog@1.1.3': + resolution: {integrity: sha512-5xzWppXTNZe6zFrTTwAJIoMJeZmdFe0l8ZqQrPGKAVvhdyOWR4r53/G7SZqx6/uf1J441oxK7GzmTkrrWDroHA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2484,8 +2498,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-arrow@1.1.0': - resolution: {integrity: sha512-FmlW1rCg7hBpEBwFbjHwCW6AmWLQM6g/v0Sn8XbP9NvmSZ2San1FpQeyPtufzOMSIx7Y4dzjlHoifhp+7NkZhw==} + '@radix-ui/react-arrow@1.1.1': + resolution: {integrity: sha512-NaVpZfmv8SKeZbn4ijN2V3jlHA9ngBG16VnIIm22nUR0Yk8KUALyBxT3KYEUnNuch9sTE8UTsS3whzBgKOL30w==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2497,8 +2511,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-collapsible@1.1.1': - resolution: {integrity: sha512-1///SnrfQHJEofLokyczERxQbWfCGQlQ2XsCZMucVs6it+lq9iw4vXy+uDn1edlb58cOZOWSldnfPAYcT4O/Yg==} + '@radix-ui/react-collapsible@1.1.2': + resolution: {integrity: sha512-PliMB63vxz7vggcyq0IxNYk8vGDrLXVWw4+W4B8YnwI1s18x7YZYqlG9PLX7XxAJUi0g2DxP4XKJMFHh/iVh9A==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2510,8 +2524,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-collection@1.1.0': - resolution: {integrity: sha512-GZsZslMJEyo1VKm5L1ZJY8tGDxZNPAoUeQUIbKeJfoi7Q4kmig5AsgLMYYuyYbfjd8fBmFORAIwYAkXMnXZgZw==} + '@radix-ui/react-collection@1.1.1': + resolution: {integrity: sha512-LwT3pSho9Dljg+wY2KN2mrrh6y3qELfftINERIzBUO9e0N+t0oMTyn3k9iv+ZqgrwGkRnLpNJrsMv9BZlt2yuA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2523,17 +2537,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-compose-refs@1.1.0': - resolution: {integrity: sha512-b4inOtiaOnYf9KWyO3jAeeCG6FeyfY6ldiEPanbUjWd+xIk5wZeHa8yVwmrJ2vderhu/BQvzCrJI0lHd+wIiqw==} - peerDependencies: - '@types/react': '*' - react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc - peerDependenciesMeta: - '@types/react': - optional: true - - '@radix-ui/react-context@1.1.0': - resolution: {integrity: sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==} + '@radix-ui/react-compose-refs@1.1.1': + resolution: {integrity: sha512-Y9VzoRDSJtgFMUCoiZBDVo084VQ5hfpXxVE+NgkdNsjiDBByiImMZKKhxMwCbdHvhlENG6a833CbFkOQvTricw==} peerDependencies: '@types/react': '*' react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc @@ -2550,8 +2555,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-dialog@1.1.2': - resolution: {integrity: sha512-Yj4dZtqa2o+kG61fzB0H2qUvmwBA2oyQroGLyNtBj1beo1khoQ3q1a2AO8rrQYjd8256CO9+N8L9tvsS+bnIyA==} + '@radix-ui/react-dialog@1.1.3': + resolution: {integrity: sha512-ujGvqQNkZ0J7caQyl8XuZRj2/TIrYcOGwqz5TeD1OMcCdfBuEMP0D12ve+8J5F9XuNUth3FAKFWo/wt0E/GJrQ==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2572,8 +2577,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-dismissable-layer@1.1.1': - resolution: {integrity: sha512-QSxg29lfr/xcev6kSz7MAlmDnzbP1eI/Dwn3Tp1ip0KT5CUELsxkekFEMVBEoykI3oV39hKT4TKZzBNMbcTZYQ==} + '@radix-ui/react-dismissable-layer@1.1.2': + resolution: {integrity: sha512-kEHnlhv7wUggvhuJPkyw4qspXLJOdYoAP4dO2c8ngGuXTq1w/HZp1YeVB+NQ2KbH1iEG+pvOCGYSqh9HZOz6hg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2585,8 +2590,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-dropdown-menu@2.1.2': - resolution: {integrity: sha512-GVZMR+eqK8/Kes0a36Qrv+i20bAPXSn8rCBTHx30w+3ECnR5o3xixAlqcVaYvLeyKUsm0aqyhWfmUcqufM8nYA==} + '@radix-ui/react-dropdown-menu@2.1.3': + resolution: {integrity: sha512-eKyAfA9e4HOavzyGJC6kiDIlHMPzAU0zqSqTg+VwS0Okvb9nkTo7L4TugkCUqM3I06ciSpdtYQ73cgB7tyUgVw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2607,8 +2612,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-focus-scope@1.1.0': - resolution: {integrity: sha512-200UD8zylvEyL8Bx+z76RJnASR2gRMuxlgFCPAe/Q/679a/r0eK3MBVYMb7vZODZcffZBdob1EGnky78xmVvcA==} + '@radix-ui/react-focus-scope@1.1.1': + resolution: {integrity: sha512-01omzJAYRxXdG2/he/+xy+c8a8gCydoQ1yOxnWNcRhrrBW5W+RQJ22EK1SaO8tb3WoUsuEw7mJjBozPzihDFjA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2629,8 +2634,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-label@2.1.0': - resolution: {integrity: sha512-peLblDlFw/ngk3UWq0VnYaOLy6agTZZ+MUO/WhVfm14vJGML+xH4FAl2XQGLqdefjNb7ApRg6Yn7U42ZhmYXdw==} + '@radix-ui/react-label@2.1.1': + resolution: {integrity: sha512-UUw5E4e/2+4kFMH7+YxORXGWggtY6sM8WIwh5RZchhLuUg2H1hc98Py+pr8HMz6rdaYrK2t296ZEjYLOCO5uUw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2642,8 +2647,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-menu@2.1.2': - resolution: {integrity: sha512-lZ0R4qR2Al6fZ4yCCZzu/ReTFrylHFxIqy7OezIpWF4bL0o9biKo0pFIvkaew3TyZ9Fy5gYVrR5zCGZBVbO1zg==} + '@radix-ui/react-menu@2.1.3': + resolution: {integrity: sha512-wY5SY6yCiJYP+DMIy7RrjF4shoFpB9LJltliVwejBm8T2yepWDJgKBhIFYOGWYR/lFHOCtbstN9duZFu6gmveQ==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2655,8 +2660,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-navigation-menu@1.2.1': - resolution: {integrity: sha512-egDo0yJD2IK8L17gC82vptkvW1jLeni1VuqCyzY727dSJdk5cDjINomouLoNk8RVF7g2aNIfENKWL4UzeU9c8Q==} + '@radix-ui/react-navigation-menu@1.2.2': + resolution: {integrity: sha512-7wHxgyNzOjsexOHFTXGJK/RDhKgrqj0siWJpm5i+sb7h+A6auY7efph6eMg0kOU4sVCLcbhHK7ZVueAXxOzvZA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2668,8 +2673,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-popover@1.1.2': - resolution: {integrity: sha512-u2HRUyWW+lOiA2g0Le0tMmT55FGOEWHwPFt1EPfbLly7uXQExFo5duNKqG2DzmFXIdqOeNd+TpE8baHWJCyP9w==} + '@radix-ui/react-popover@1.1.3': + resolution: {integrity: sha512-MBDKFwRe6fi0LT8m/Jl4V8J3WbS/UfXJtsgg8Ym5w5AyPG3XfHH4zhBp1P8HmZK83T8J7UzVm6/JpDE3WMl1Dw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2681,8 +2686,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-popper@1.2.0': - resolution: {integrity: sha512-ZnRMshKF43aBxVWPWvbj21+7TQCvhuULWJ4gNIKYpRlQt5xGRhLx66tMp8pya2UkGHTSlhpXwmjqltDYHhw7Vg==} + '@radix-ui/react-popper@1.2.1': + resolution: {integrity: sha512-3kn5Me69L+jv82EKRuQCXdYyf1DqHwD2U/sxoNgBGCB7K9TRc3bQamQ+5EPM9EvyPdli0W41sROd+ZU1dTCztw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2694,8 +2699,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-portal@1.1.2': - resolution: {integrity: sha512-WeDYLGPxJb/5EGBoedyJbT0MpoULmwnIPMJMSldkuiMsBAv7N1cRdsTWZWht9vpPOiN3qyiGAtbK2is47/uMFg==} + '@radix-ui/react-portal@1.1.3': + resolution: {integrity: sha512-NciRqhXnGojhT93RPyDaMPfLH3ZSl4jjIFbZQ1b/vxvZEdHsBZ49wP9w8L3HzUQwep01LcWtkUvm0OVB5JAHTw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2707,8 +2712,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-presence@1.1.1': - resolution: {integrity: sha512-IeFXVi4YS1K0wVZzXNrbaaUvIJ3qdY+/Ih4eHFhWA9SwGR9UDX7Ck8abvL57C4cv3wwMvUE0OG69Qc3NCcTe/A==} + '@radix-ui/react-presence@1.1.2': + resolution: {integrity: sha512-18TFr80t5EVgL9x1SwF/YGtfG+l0BS0PRAlCWBDoBEiDQjeKgnNZRVJp/oVBl24sr3Gbfwc/Qpj4OcWTQMsAEg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2720,8 +2725,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-primitive@2.0.0': - resolution: {integrity: sha512-ZSpFm0/uHa8zTvKBDjLFWLo8dkr4MBsiDLz0g3gMUwqgLHz9rTaRRGYDgvZPtBJgYCBKXkS9fzmoySgr8CO6Cw==} + '@radix-ui/react-primitive@2.0.1': + resolution: {integrity: sha512-sHCWTtxwNn3L3fH8qAfnF3WbUZycW93SM1j3NFDzXBiz8D6F5UTTy8G1+WFEaiCdvCVRJWj6N2R4Xq6HdiHmDg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2733,8 +2738,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-roving-focus@1.1.0': - resolution: {integrity: sha512-EA6AMGeq9AEeQDeSH0aZgG198qkfHSbvWTf1HvoDmOB5bBG/qTxjYMWUKMnYiV6J/iP/J8MEFSuB2zRU2n7ODA==} + '@radix-ui/react-roving-focus@1.1.1': + resolution: {integrity: sha512-QE1RoxPGJ/Nm8Qmk0PxP8ojmoaS67i0s7hVssS7KuI2FQoc/uzVlZsqKfQvxPE6D8hICCPHJ4D88zNhT3OOmkw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2746,8 +2751,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-slider@1.2.1': - resolution: {integrity: sha512-bEzQoDW0XP+h/oGbutF5VMWJPAl/UU8IJjr7h02SOHDIIIxq+cep8nItVNoBV+OMmahCdqdF38FTpmXoqQUGvw==} + '@radix-ui/react-slider@1.2.2': + resolution: {integrity: sha512-sNlU06ii1/ZcbHf8I9En54ZPW0Vil/yPVg4vQMcFNjrIx51jsHbFl1HYHQvCIWJSr1q0ZmA+iIs/ZTv8h7HHSA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2759,8 +2764,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-slot@1.1.0': - resolution: {integrity: sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==} + '@radix-ui/react-slot@1.1.1': + resolution: {integrity: sha512-RApLLOcINYJA+dMVbOju7MYv1Mb2EBp2nH4HdDzXTSyaR5optlm6Otrz1euW3HbdOR8UmmFK06TD+A9frYWv+g==} peerDependencies: '@types/react': '*' react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc @@ -2768,8 +2773,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-switch@1.1.1': - resolution: {integrity: sha512-diPqDDoBcZPSicYoMWdWx+bCPuTRH4QSp9J+65IvtdS0Kuzt67bI6n32vCj8q6NZmYW/ah+2orOtMwcX5eQwIg==} + '@radix-ui/react-switch@1.1.2': + resolution: {integrity: sha512-zGukiWHjEdBCRyXvKR6iXAQG6qXm2esuAD6kDOi9Cn+1X6ev3ASo4+CsYaD6Fov9r/AQFekqnD/7+V0Cs6/98g==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2781,8 +2786,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-toast@1.2.2': - resolution: {integrity: sha512-Z6pqSzmAP/bFJoqMAston4eSNa+ud44NSZTiZUmUen+IOZ5nBY8kzuU5WDBVyFXPtcW6yUalOHsxM/BP6Sv8ww==} + '@radix-ui/react-toast@1.2.3': + resolution: {integrity: sha512-oB8irs7CGAml6zWbum7MNySTH/sR7PM1ZQyLV8reO946u73sU83yZUKijrMLNbm4hTOrJY4tE8Oa/XUKrOr2Wg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2794,8 +2799,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-tooltip@1.1.3': - resolution: {integrity: sha512-Z4w1FIS0BqVFI2c1jZvb/uDVJijJjJ2ZMuPV81oVgTZ7g3BZxobplnMVvXtFWgtozdvYJ+MFWtwkM5S2HnAong==} + '@radix-ui/react-tooltip@1.1.5': + resolution: {integrity: sha512-IucoQPcK5nwUuztaxBQvudvYwH58wtRcJlv1qvaMSyIbL9dEBfFN0vRf/D8xDbu6HmAJLlNGty4z8Na+vIqe9Q==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2870,8 +2875,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-visually-hidden@1.1.0': - resolution: {integrity: sha512-N8MDZqtgCgG5S3aV60INAB475osJousYpZ4cTJ2cFbMpdHS5Y6loLTH8LPtkj2QN0x93J30HT/M3qJXM0+lyeQ==} + '@radix-ui/react-visually-hidden@1.1.1': + resolution: {integrity: sha512-vVfA2IZ9q/J+gEamvj761Oq1FpWgCDaNOOIfbPVp2MVPLEomUr5+Vf7kJGwQ24YxZSlQVar7Bes8kyTo5Dshpg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2886,480 +2891,496 @@ packages: '@radix-ui/rect@1.1.0': resolution: {integrity: sha512-A9+lCBZoaMJlVKcRBz2YByCG+Cp2t6nAnMnNba+XiWxnj6r4JUFqfsgwocMBZU9LPtdxC6wB56ySYpc7LQIoJg==} - '@react-aria/breadcrumbs@3.5.18': - resolution: {integrity: sha512-JRc6nAwQsjqsPw/3MlGwJcVo9ACZDbCOwWNNEnj8mR0fQopJO5xliq3qVzxDRZjdYrVUfTTyKXuepv/jMB1Y6Q==} + '@react-aria/breadcrumbs@3.5.19': + resolution: {integrity: sha512-mVngOPFYVVhec89rf/CiYQGTfaLRfHFtX+JQwY7sNYNqSA+gO8p4lNARe3Be6bJPgH+LUQuruIY9/ZDL6LT3HA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/button@3.10.1': - resolution: {integrity: sha512-1vkRsjdvJrJleK73u7ClrW4Fw3mtr2hIs8M2yLZUpLoqHXnIYJwmeEMtzwyPFYKBc5jaHcGXw45any7Puy1aFA==} + '@react-aria/button@3.11.0': + resolution: {integrity: sha512-b37eIV6IW11KmNIAm65F3SEl2/mgj5BrHIysW6smZX3KoKWTGYsYfcQkmtNgY0GOSFfDxMCoolsZ6mxC00nSDA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/calendar@3.5.13': - resolution: {integrity: sha512-BJV5IwIH4UPDa6/HRTOBcM1wC+/6p823VrbocV9mr+rt5cCnuh+cqcCQKqUSEbfaTMPrmabjBuEaQIvqjLRYUA==} + '@react-aria/calendar@3.6.0': + resolution: {integrity: sha512-tZ3nd5DP8uxckbj83Pt+4RqgcTWDlGi7njzc7QqFOG2ApfnYDUXbIpb/Q4KY6JNlJskG8q33wo0XfOwNy8J+eg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/checkbox@3.14.8': - resolution: {integrity: sha512-0qPJ3fiQQm7tiMHmIhR9iokr/MhhI2h6OWX/pDeIy/Gj63WSVk+Cka3NUhgMRGkguHKDZPKaFjK1oZQsXhCThQ==} + '@react-aria/checkbox@3.15.0': + resolution: {integrity: sha512-z/8xd4em7o0MroBXwkkwv7QRwiJaA1FwqMhRUb7iqtBGP2oSytBEDf0N7L09oci32a1P4ZPz2rMK5GlLh/PD6g==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/color@3.0.1': - resolution: {integrity: sha512-7hTCdXCU2/qpZuIrJcVr+s87C2MqHfi9Y461gMza5DjdUzlcy480UZ/iknbw82C0a+oVo08D/bnQctEjja05pw==} + '@react-aria/color@3.0.2': + resolution: {integrity: sha512-dSM5qQRcR1gRGYCBw0IGRmc29gjfoht3cQleKb8MMNcgHYa2oi5VdCs2yKXmYFwwVC6uPtnlNy9S6e0spqdr+w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/combobox@3.10.5': - resolution: {integrity: sha512-1cjBJXWYuR0de+9IEU1MOer3H5FSlbrdaqlWo+M6vvMymBL2OjjwXiG3LY1mR65ZwHoTswXzt6/mujUKaxk5vw==} + '@react-aria/combobox@3.11.0': + resolution: {integrity: sha512-s88YMmPkMO1WSoiH1KIyZDLJqUwvM2wHXXakj3cYw1tBHGo4rOUFq+JWQIbM5EDO4HOR4AUUqzIUd0NO7t3zyg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/datepicker@3.11.4': - resolution: {integrity: sha512-TXe1TB/pSwrIQ5BIDr6NCAYjBaKgLN6cP5DlAihywHzqxbM6vO8GU6qbrZNSBrtfzZnrR/4z66Vlw6rhznLnqQ==} + '@react-aria/datepicker@3.12.0': + resolution: {integrity: sha512-VYNXioLfddIHpwQx211+rTYuunDmI7VHWBRetCpH3loIsVFuhFSRchTQpclAzxolO3g0vO7pMVj9VYt7Swp6kg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/dialog@3.5.19': - resolution: {integrity: sha512-I3AJWpAWCajj8Ama8qLQ18Tc37ODyk+Ym3haYEl5L4QnuFc0dU1sMJr15fppDGIxYjwvTTfctyhaSCz+S+wpkw==} + '@react-aria/dialog@3.5.20': + resolution: {integrity: sha512-l0GZVLgeOd3kL3Yj8xQW7wN3gn9WW3RLd/SGI9t7ciTq+I/FhftjXCWzXLlOCCTLMf+gv7eazecECtmoWUaZWQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/dnd@3.7.4': - resolution: {integrity: sha512-lRE8SVyK/MPbF6NiVXHoriOV0QulNKkSndyDr3TWPsLhH5GKQso5jSx8/5ogbDgRTzIsmIQldj/HlW238DCiSg==} + '@react-aria/disclosure@3.0.0': + resolution: {integrity: sha512-xO9QTQSvymujTjCs1iCQ4+dKZvtF/rVVaFZBKlUtqIqwTHMdqeZu4fh5miLEnTyVLNHMGzLrFggsd8Q+niC9Og==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/focus@3.18.4': - resolution: {integrity: sha512-91J35077w9UNaMK1cpMUEFRkNNz0uZjnSwiyBCFuRdaVuivO53wNC9XtWSDNDdcO5cGy87vfJRVAiyoCn/mjqA==} + '@react-aria/dnd@3.8.0': + resolution: {integrity: sha512-JiqHY3E9fDU5Kb4gN22cuK6QNlpMCGe6ngR/BV+Q8mLEsdoWcoUAYOtYXVNNTRvCdVbEWI87FUU+ThyPpoDhNQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/form@3.0.10': - resolution: {integrity: sha512-hWBrqEXxBxcpYTJv0telQKaiu2728EUFHta8/RGBqJ4+MhKKxI7+PnLoms78IuiK0MCYvukHfun1fuQvK+8jsg==} + '@react-aria/focus@3.19.0': + resolution: {integrity: sha512-hPF9EXoUQeQl1Y21/rbV2H4FdUR2v+4/I0/vB+8U3bT1CJ+1AFj1hc/rqx2DqEwDlEwOHN+E4+mRahQmlybq0A==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/grid@3.10.5': - resolution: {integrity: sha512-9sLa+rpLgRZk7VX+tvdSudn1tdVgolVzhDLGWd95yS4UtPVMihTMGBrRoByY57Wxvh1V+7Ptw8kc6tsRSotYKg==} + '@react-aria/form@3.0.11': + resolution: {integrity: sha512-oXzjTiwVuuWjZ8muU0hp3BrDH5qjVctLOF50mjPvqUbvXQTHhoDxWweyIXPQjGshaqBd2w4pWaE4A2rG2O/apw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/gridlist@3.9.5': - resolution: {integrity: sha512-LM+3D0amZZ1qiyqWVG52j0YRWt2chdpx+WG80ryDKwHLDIq7uz1+KXyIfv8cFt/cZcl6+9Ft3kWALCAi6O4NLA==} + '@react-aria/grid@3.11.0': + resolution: {integrity: sha512-lN5FpQgu2Rq0CzTPWmzRpq6QHcMmzsXYeClsgO3108uVp1/genBNAObYVTxGOKe/jb9q99trz8EtIn05O6KN1g==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/i18n@3.12.3': - resolution: {integrity: sha512-0Tp/4JwnCVNKDfuknPF+/xf3/woOc8gUjTU2nCjO3mCVb4FU7KFtjxQ2rrx+6hpIVG6g+N9qfMjRa/ggVH0CJg==} + '@react-aria/gridlist@3.10.0': + resolution: {integrity: sha512-UcblfSZ7kJBrjg9mQ5VbnRevN81UiYB4NuL5PwIpBpridO7tnl4ew6+96PYU7Wj1chHhPS3x0b0zmuSVN7A0LA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/interactions@3.22.4': - resolution: {integrity: sha512-E0vsgtpItmknq/MJELqYJwib+YN18Qag8nroqwjk1qOnBa9ROIkUhWJerLi1qs5diXq9LHKehZDXRlwPvdEFww==} + '@react-aria/i18n@3.12.4': + resolution: {integrity: sha512-j9+UL3q0Ls8MhXV9gtnKlyozq4aM95YywXqnmJtzT1rYeBx7w28hooqrWkCYLfqr4OIryv1KUnPiCSLwC2OC7w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/label@3.7.12': - resolution: {integrity: sha512-u9xT90lAlgb7xiv+p0md9QwCHz65XL7tjS5e29e88Rs3ptkv3aQubTqxVOUTEwzbNUT4A1QqTjUm1yfHewIRUw==} + '@react-aria/interactions@3.22.5': + resolution: {integrity: sha512-kMwiAD9E0TQp+XNnOs13yVJghiy8ET8L0cbkeuTgNI96sOAp/63EJ1FSrDf17iD8sdjt41LafwX/dKXW9nCcLQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/link@3.7.6': - resolution: {integrity: sha512-8buJznRWoOud8ApygUAz7TsshXNs6HDGB6YOYEJxy0WTKILn0U5NUymw2PWC14+bWRPelHMKmi6vbFBrJWzSzQ==} + '@react-aria/label@3.7.13': + resolution: {integrity: sha512-brSAXZVTey5RG/Ex6mTrV/9IhGSQFU4Al34qmjEDho+Z2qT4oPwf8k7TRXWWqzOU0ugYxekYbsLd2zlN3XvWcg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/listbox@3.13.5': - resolution: {integrity: sha512-tn32L/PIELIPYfDWCJ3OBRvvb/jCEvIzs6IYs8xCISV5W4853Je/WnA8wumWnz07U9sODYFmHUx2ThO7Z7dH7Q==} + '@react-aria/link@3.7.7': + resolution: {integrity: sha512-eVBRcHKhNSsATYWv5wRnZXRqPVcKAWWakyvfrYePIKpC3s4BaHZyTGYdefk8ZwZdEOuQZBqLMnjW80q1uhtkuA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/live-announcer@3.4.0': - resolution: {integrity: sha512-VBxEdMq2SbtRbNTQNcDR2G6E3lEl5cJSBiHTTO8Ln1AL76LiazrylIXGgoktqzCfRQmyq0v8CHk1cNKDU9mvJg==} + '@react-aria/listbox@3.13.6': + resolution: {integrity: sha512-6hEXEXIZVau9lgBZ4VVjFR3JnGU+fJaPmV3HP0UZ2ucUptfG0MZo24cn+ZQJsWiuaCfNFv5b8qribiv+BcO+Kg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-aria/live-announcer@3.4.1': + resolution: {integrity: sha512-4X2mcxgqLvvkqxv2l1n00jTzUxxe0kkLiapBGH1LHX/CxA1oQcHDqv8etJ2ZOwmS/MSBBiWnv3DwYHDOF6ubig==} - '@react-aria/menu@3.15.5': - resolution: {integrity: sha512-ygfS032hJSZCYYbMHnUSmUTVMaz99L9AUZ9kMa6g+k2X1t92K1gXfhYYkoClQD6+G0ch7zm0SwYFlUmRf9yOEA==} + '@react-aria/menu@3.16.0': + resolution: {integrity: sha512-TNk+Vd3TbpBPUxEloAdHRTaRxf9JBK7YmkHYiq0Yj5Lc22KS0E2eTyhpPM9xJvEWN2TlC5TEvNfdyui2kYWFFQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/meter@3.4.17': - resolution: {integrity: sha512-08wbQhfvVWzpWilhn/WD7cQ7TqafS/66umTk7+X6BW6TrS1//6loNNJV62IC3F7sskel4iEAtl2gW0WpW8zEdg==} + '@react-aria/meter@3.4.18': + resolution: {integrity: sha512-tTX3LLlmDIHqrC42dkdf+upb1c4UbhlpZ52gqB64lZD4OD4HE+vMTwNSe+7MRKMLvcdKPWCRC35PnxIHZ15kfQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/numberfield@3.11.8': - resolution: {integrity: sha512-CWRHbrjfpvEqBmtjwX8LjVds6+tMNneRlKF46ked5sZilfU2jIirufaucM36N4vX6N/W7nFR/rCbp2WCOU9p3Q==} + '@react-aria/numberfield@3.11.9': + resolution: {integrity: sha512-3tiGPx2y4zyOV7PmdBASes99ZZsFTZAJTnU45Z+p1CW4131lw7y2ZhbojBl7U6DaXAJvi1z6zY6cq2UE9w5a0Q==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/overlays@3.23.4': - resolution: {integrity: sha512-MZUW6SUlTWOwKuFTqUTxW5BnvdW3Y9cEwanWuz98NX3ST7JYe/3ZcZhb37/fGW4uoGHnQ9icEwVf0rbMrK2STg==} + '@react-aria/overlays@3.24.0': + resolution: {integrity: sha512-0kAXBsMNTc/a3M07tK9Cdt/ea8CxTAEJ223g8YgqImlmoBBYAL7dl5G01IOj67TM64uWPTmZrOklBchHWgEm3A==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/progress@3.4.17': - resolution: {integrity: sha512-5+01WNibLoNS5KcfU5p6vg7Lhz17plqqzv/uITx28zzj3saaj0VLR7n57Ig2fXe8ZEQoUS89BS3sIEsIf96S1A==} + '@react-aria/progress@3.4.18': + resolution: {integrity: sha512-FOLgJ9t9i1u3oAAimybJG6r7/soNPBnJfWo4Yr6MmaUv90qVGa1h6kiuM5m9H/bm5JobAebhdfHit9lFlgsCmg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/radio@3.10.9': - resolution: {integrity: sha512-XnU7zGTEku1mPvJweX4I3ifwEBtglEWYoO4CZGvA3eXj39X8iGwNZXUst1pdk2ykWUKbtwrmsWA6zG2OAGODYw==} + '@react-aria/radio@3.10.10': + resolution: {integrity: sha512-NVdeOVrsrHgSfwL2jWCCXFsWZb+RMRZErj5vthHQW4nkHECGOzeX56VaLWTSvdoCPqi9wdIX8A6K9peeAIgxzA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/searchfield@3.7.10': - resolution: {integrity: sha512-1XTYh2dycedaK1tgpHAHcu8PTK1wG3dv53yLziu07JsBe9tX6O8jIFBhZK8SpfNnP8pEOI3PIlVEjaarLwgWzQ==} + '@react-aria/searchfield@3.7.11': + resolution: {integrity: sha512-wFf6QxtBFfoxy0ANxI0+ftFEBGynVCY0+ce4H4Y9LpUTQsIKMp3sdc7LoUFORWw5Yee6Eid5cFPQX0Ymnk+ZJg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/select@3.14.11': - resolution: {integrity: sha512-rX5U4JcPNV41lNEF1tAxNxqrGENnLGZL/D5Y+YNpqKSU5U09+hD3ovsflNkF/d+deb25zg45JRxumwOCQ+rfyw==} + '@react-aria/select@3.15.0': + resolution: {integrity: sha512-zgBOUNy81aJplfc3NKDJMv8HkXjBGzaFF3XDzNfW8vJ7nD9rcTRUN5SQ1XCEnKMv12B/Euk9zt6kd+tX0wk1vQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/selection@3.20.1': - resolution: {integrity: sha512-My0w8UC/7PAkz/1yZUjr2VRuzDZz1RrbgTqP36j5hsJx8RczDTjI4TmKtQNKG0ggaP4w83G2Og5JPTq3w3LMAw==} + '@react-aria/selection@3.21.0': + resolution: {integrity: sha512-52JJ6hlPcM+gt0VV3DBmz6Kj1YAJr13TfutrKfGWcK36LvNCBm1j0N+TDqbdnlp8Nue6w0+5FIwZq44XPYiBGg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/separator@3.4.3': - resolution: {integrity: sha512-L+eCmSGfRJ9jScHZqBkmOkp44LBARisDjRdYbGrLlsAEcOiHUXufnfpxz2rgkUGBdUgnI9hIk12q5kdy0UxGjg==} + '@react-aria/separator@3.4.4': + resolution: {integrity: sha512-dH+qt0Mdh0nhKXCHW6AR4DF8DKLUBP26QYWaoThPdBwIpypH/JVKowpPtWms1P4b36U6XzHXHnTTEn/ZVoCqNA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/slider@3.7.13': - resolution: {integrity: sha512-yGlIpoOUKUoP0M3iI8ZHU001NASBOeZJSIQNfoS7HiqSR3bz+6BX7DRAM6B+CPHJleUtrdQ6JjO/8V8ZUV2kNQ==} + '@react-aria/slider@3.7.14': + resolution: {integrity: sha512-7rOiKjLkEZ0j7mPMlwrqivc+K4OSfL14slaQp06GHRiJkhiWXh2/drPe15hgNq55HmBQBpA0umKMkJcqVgmXPA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/spinbutton@3.6.9': - resolution: {integrity: sha512-m+uVJdiIc2LrLVDGjU7p8P2O2gUvTN26GR+NgH4rl+tUSuAB0+T1rjls/C+oXEqQjCpQihEB9Bt4M+VHpzmyjA==} + '@react-aria/spinbutton@3.6.10': + resolution: {integrity: sha512-nhYEYk7xUNOZDaqiQ5w/nHH9ouqjJbabTWXH+KK7UR1oVGfo4z1wG94l8KWF3Z6SGGnBxzLJyTBguZ4g9aYTSg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/ssr@3.9.6': - resolution: {integrity: sha512-iLo82l82ilMiVGy342SELjshuWottlb5+VefO3jOQqQRNYnJBFpUSadswDPbRimSgJUZuFwIEYs6AabkP038fA==} + '@react-aria/ssr@3.9.7': + resolution: {integrity: sha512-GQygZaGlmYjmYM+tiNBA5C6acmiDWF52Nqd40bBp0Znk4M4hP+LTmI0lpI1BuKMw45T8RIhrAsICIfKwZvi2Gg==} engines: {node: '>= 12'} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/switch@3.6.9': - resolution: {integrity: sha512-w7xIywpR6llm22DXYOObZ2Uqvsw+gNmxdJ86h8+YRtpSkFnPMhXtTMv3RXpEGYhPTt/YDIqfxiluF1E2IHGwIA==} + '@react-aria/switch@3.6.10': + resolution: {integrity: sha512-FtaI9WaEP1tAmra1sYlAkYXg9x75P5UtgY8pSbe9+1WRyWbuE1QZT+RNCTi3IU4fZ7iJQmXH6+VaMyzPlSUagw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/table@3.15.5': - resolution: {integrity: sha512-bdNZF0ZoNOfyOEIK/ctv0llacaCNk8mv+GGy8mwh5bZeJjd8KuDIpYQtZJYvf2YVvPYRWyXRhF0/B229m65f/g==} + '@react-aria/table@3.16.0': + resolution: {integrity: sha512-9xF9S3CJ7XRiiK92hsIKxPedD0kgcQWwqTMtj3IBynpQ4vsnRiW3YNIzrn9C3apjknRZDTSta8O2QPYCUMmw2A==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/tabs@3.9.7': - resolution: {integrity: sha512-f78P2Y9ZCYtwOnteku9mPVIk21xSSREYWaQPtA9ebSgVbeR5ya6RpaX9ISc9cd0HEF3Av+hZYyS1pNXXWymv9g==} + '@react-aria/tabs@3.9.8': + resolution: {integrity: sha512-Nur/qRFBe+Zrt4xcCJV/ULXCS3Mlae+B89bp1Gl20vSDqk6uaPtGk+cS5k03eugOvas7AQapqNJsJgKd66TChw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/tag@3.4.7': - resolution: {integrity: sha512-hreVvphUeYUfMN6gjM3+WouN2P/WGuR0rGpOrFk2HEnGDPg3Ar0isfdAaciTSBOc26CDKNgrmzRguxCmKKuqgw==} + '@react-aria/tag@3.4.8': + resolution: {integrity: sha512-exWl52bsFtJuzaqMYvSnLteUoPqb3Wf+uICru/yRtREJsWVqjJF38NCVlU73Yqd9qMPTctDrboSZFAWAWKDxoA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/textfield@3.14.10': - resolution: {integrity: sha512-vG44FgxwfJUF2S6tRG+Sg646DDEgs0CO9RYniafEOHz8rwcNIH3lML7n8LAfzQa+BjBY28+UF0wmqEvd6VCzCQ==} + '@react-aria/textfield@3.15.0': + resolution: {integrity: sha512-V5mg7y1OR6WXYHdhhm4FC7QyGc9TideVRDFij1SdOJrIo5IFB7lvwpOS0GmgwkVbtr71PTRMjZnNbrJUFU6VNA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/toggle@3.10.9': - resolution: {integrity: sha512-dtfnyIU2/kcH9rFAiB48diSmaXDv45K7UCuTkMQLjbQa3QHC1oYNbleVN/VdGyAMBsIWtfl8L4uuPrAQmDV/bg==} + '@react-aria/toggle@3.10.10': + resolution: {integrity: sha512-QwMT/vTNrbrILxWVHfd9zVQ3mV2NdBwyRu+DphVQiFAXcmc808LEaIX2n0lI6FCsUDC9ZejCyvzd91/YemdZ1Q==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/tooltip@3.7.9': - resolution: {integrity: sha512-TqVJ7YqaP/enxNyA1QGr43w4nBZmOs6Hb/pROMS5afbX7gHgMVFn0lTRc6DC2cvcfgYc4WICs2QiQMniZt/E7A==} + '@react-aria/toolbar@3.0.0-beta.11': + resolution: {integrity: sha512-LM3jTRFNDgoEpoL568WaiuqiVM7eynSQLJis1hV0vlVnhTd7M7kzt7zoOjzxVb5Uapz02uCp1Fsm4wQMz09qwQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/utils@3.25.3': - resolution: {integrity: sha512-PR5H/2vaD8fSq0H/UB9inNbc8KDcVmW6fYAfSWkkn+OAdhTTMVKqXXrZuZBWyFfSD5Ze7VN6acr4hrOQm2bmrA==} + '@react-aria/tooltip@3.7.10': + resolution: {integrity: sha512-Udi3XOnrF/SYIz72jw9bgB74MG/yCOzF5pozHj2FH2HiJlchYv/b6rHByV/77IZemdlkmL/uugrv/7raPLSlnw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-aria/visually-hidden@3.8.17': - resolution: {integrity: sha512-WFgny1q2CbxxU6gu46TGQXf1DjsnuSk+RBDP4M7bm1mUVZzoCp7U7AtjNmsBrWg0NejxUdgD7+7jkHHCQ91qRA==} + '@react-aria/utils@3.26.0': + resolution: {integrity: sha512-LkZouGSjjQ0rEqo4XJosS4L3YC/zzQkfRM3KoqK6fUOmUJ9t0jQ09WjiF+uOoG9u+p30AVg3TrZRUWmoTS+koQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/calendar@3.5.5': - resolution: {integrity: sha512-HzaiDRhrmaYIly8hRsjjIrydLkldiw1Ws6T/130NLQOt+VPwRW/x0R+nil42mA9LZ6oV0XN0NpmG5tn7TaKRGw==} + '@react-aria/visually-hidden@3.8.18': + resolution: {integrity: sha512-l/0igp+uub/salP35SsNWq5mGmg3G5F5QMS1gDZ8p28n7CgjvzyiGhJbbca7Oxvaw1HRFzVl9ev+89I7moNnFQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/checkbox@3.6.9': - resolution: {integrity: sha512-JrY3ecnK/SSJPxw+qhGhg3YV4e0CpUcPDrVwY3mSiAE932DPd19xr+qVCknJ34H7JYYt/q0l2z0lmgPnl96RTg==} + '@react-stately/calendar@3.6.0': + resolution: {integrity: sha512-GqUtOtGnwWjtNrJud8nY/ywI4VBP5byToNVRTnxbMl+gYO1Qe/uc5NG7zjwMxhb2kqSBHZFdkF0DXVqG2Ul+BA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/collections@3.11.0': - resolution: {integrity: sha512-TiJeJjHMPSbbeAhmCXLJNSCk0fa5XnCvEuYw6HtQzDnYiq1AD7KAwkpjC5NfKkjqF3FLXs/v9RDm/P69q6rYzw==} + '@react-stately/checkbox@3.6.10': + resolution: {integrity: sha512-LHm7i4YI8A/RdgWAuADrnSAYIaYYpQeZqsp1a03Og0pJHAlZL0ymN3y2IFwbZueY0rnfM+yF+kWNXjJqbKrFEQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/color@3.8.0': - resolution: {integrity: sha512-lBH91HEStZeayhE/FkDMt9WC0UISQiAn8DoD2hfpTGeeWscX/soyxZA7oVL7zBOG9RfDBMNzF+CybVROrWSKAQ==} + '@react-stately/collections@3.12.0': + resolution: {integrity: sha512-MfR9hwCxe5oXv4qrLUnjidwM50U35EFmInUeFf8i9mskYwWlRYS0O1/9PZ0oF1M0cKambaRHKEy98jczgb9ycA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/combobox@3.10.0': - resolution: {integrity: sha512-4W4HCCjjoddW/LZM3pSSeLoV7ncYXlaICKmqlBcbtLR5jY4U5Kx+pPpy3oJ1vCdjDHatIxZ0tVKEBP7vBQVeGQ==} + '@react-stately/color@3.8.1': + resolution: {integrity: sha512-7eN7K+KJRu+rxK351eGrzoq2cG+yipr90i5b1cUu4lioYmcH4WdsfjmM5Ku6gypbafH+kTDfflvO6hiY1NZH+A==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/datepicker@3.10.3': - resolution: {integrity: sha512-6PJW1QMwk6BQMktV9L6DA4f2rfAdLfbq3iTNLy4qxd5IfNPLMUZiJGGTj+cuqx0WcEl+q5irp+YhKBpbmhPZHg==} + '@react-stately/combobox@3.10.1': + resolution: {integrity: sha512-Rso+H+ZEDGFAhpKWbnRxRR/r7YNmYVtt+Rn0eNDNIUp3bYaxIBCdCySyAtALs4I8RZXZQ9zoUznP7YeVwG3cLg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/dnd@3.4.3': - resolution: {integrity: sha512-sUvhmMxFEw6P2MW7walx0ntakIihxdPxA06K9YZ3+ReaUvzQuRw5cFDaTTHrlegWRMYD0CyQaKlGIaTQihhvVA==} + '@react-stately/datepicker@3.11.0': + resolution: {integrity: sha512-d9MJF34A0VrhL5y5S8mAISA8uwfNCQKmR2k4KoQJm3De1J8SQeNzSjLviAwh1faDow6FXGlA6tVbTrHyDcBgBg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/flags@3.0.4': - resolution: {integrity: sha512-RNJEkOALwKg+JeYsfNlfPc4GXm7hiBLX0yuHOkRapWEyDOfi0cinkV/TZG4goOZdQ5tBpHmemf2qqiHAxqHlzQ==} + '@react-stately/disclosure@3.0.0': + resolution: {integrity: sha512-Z9+fi0/41ZXHjGopORQza7mk4lFEFslKhy65ehEo6O6j2GuIV0659ExIVDsmJoJSFjXCfGh0sX8oTSOlXi9gqg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/form@3.0.6': - resolution: {integrity: sha512-KMsxm3/V0iCv/6ikt4JEjVM3LW2AgCzo7aNotMzRobtwIo0RwaUo7DQNY00rGgFQ3/IjzI6DcVo13D+AVE/zXg==} + '@react-stately/dnd@3.5.0': + resolution: {integrity: sha512-ZcWFw1npEDnATiy3TEdzA1skQ3UEIyfbNA6VhPNO8yiSVLxoxBOaEaq8VVS72fRGAtxud6dgOy8BnsP9JwDClQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/grid@3.9.3': - resolution: {integrity: sha512-P5KgCNYwm/n8bbLx6527li89RQWoESikrsg2MMyUpUd6IJ321t2pGONGRRQzxE0SBMolPRDJKV0Do2OlsjYKhQ==} + '@react-stately/flags@3.0.5': + resolution: {integrity: sha512-6wks4csxUwPCp23LgJSnkBRhrWpd9jGd64DjcCTNB2AHIFu7Ab1W59pJpUL6TW7uAxVxdNKjgn6D1hlBy8qWsA==} + + '@react-stately/form@3.1.0': + resolution: {integrity: sha512-E2wxNQ0QaTyDHD0nJFtTSnEH9A3bpJurwxhS4vgcUmESHgjFEMLlC9irUSZKgvOgb42GAq+fHoWBsgKeTp9Big==} + peerDependencies: + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + + '@react-stately/grid@3.10.0': + resolution: {integrity: sha512-ii+DdsOBvCnHMgL0JvUfFwO1kiAPP19Bpdpl6zn/oOltk6F5TmnoyNrzyz+2///1hCiySI3FE1O7ujsAQs7a6Q==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/list@3.11.0': - resolution: {integrity: sha512-O+BxXcbtoLZWn4QIT54RoFUaM+QaJQm6s0ZBJ3Jv4ILIhukVOc55ra+aWMVlXFQSpbf6I3hyVP6cz1yyvd5Rtw==} + '@react-stately/list@3.11.1': + resolution: {integrity: sha512-UCOpIvqBOjwLtk7zVTYWuKU1m1Oe61Q5lNar/GwHaV1nAiSQ8/yYlhr40NkBEs9X3plEfsV28UIpzOrYnu1tPg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/menu@3.8.3': - resolution: {integrity: sha512-sV63V+cMgzipx/N7dq5GaXoItfXIfFEpCtlk3PM2vKstlCJalszXrdo+x996bkeU96h0plB7znAlhlXOeTKzUg==} + '@react-stately/menu@3.9.0': + resolution: {integrity: sha512-++sm0fzZeUs9GvtRbj5RwrP+KL9KPANp9f4SvtI3s+MP+Y/X3X7LNNePeeccGeyikB5fzMsuyvd82bRRW9IhDQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/numberfield@3.9.7': - resolution: {integrity: sha512-PjSgCCpYasGCEAznFQNqa2JhhEQ5+/2eMiV7ZI5j76q3edTNF8G5OOCl2RazDbzFp6vDAnRVT7Kctx5Tl5R/Zw==} + '@react-stately/numberfield@3.9.8': + resolution: {integrity: sha512-J6qGILxDNEtu7yvd3/y+FpbrxEaAeIODwlrFo6z1kvuDlLAm/KszXAc75yoDi0OtakFTCMP6/HR5VnHaQdMJ3w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/overlays@3.6.11': - resolution: {integrity: sha512-usuxitwOx4FbmOW7Og4VM8R8ZjerbHZLLbFaxZW7pWLs7Ypway1YhJ3SWcyNTYK7NEk4o602kSoU6MSev1Vgag==} + '@react-stately/overlays@3.6.12': + resolution: {integrity: sha512-QinvZhwZgj8obUyPIcyURSCjTZlqZYRRCS60TF8jH8ZpT0tEAuDb3wvhhSXuYA3Xo9EHLwvLjEf3tQKKdAQArw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/radio@3.10.8': - resolution: {integrity: sha512-VRq6Gzsbk3jzX6hdrSoDoSra9vLRsOi2pLkvW/CMrJ0GSgMwr8jjvJKnNFvYJ3eYQb20EwkarsOAfk7vPSIt/Q==} + '@react-stately/radio@3.10.9': + resolution: {integrity: sha512-kUQ7VdqFke8SDRCatw2jW3rgzMWbvw+n2imN2THETynI47NmNLzNP11dlGO2OllRtTrsLhmBNlYHa3W62pFpAw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/searchfield@3.5.7': - resolution: {integrity: sha512-VxEG4tWDypdXQ8f7clZBu5Qmc4osqDBeA/gNMA2i1j/h2zRVcCJ0fRCHuDeXLSWBqF1XXAI4TWV53fBBwJusbg==} + '@react-stately/searchfield@3.5.8': + resolution: {integrity: sha512-jtquvGadx1DmtQqPKaVO6Qg/xpBjNxsOd59ciig9xRxpxV+90i996EX1E2R6R+tGJdSM1pD++7PVOO4yE++HOg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/select@3.6.8': - resolution: {integrity: sha512-fLAVzGeYSdYdBdrEVws6Pb1ywFPdapA0eWphoW5s3fS0/pKcVWwbCHeHlaBEi1ISyqEubQZFGQdeFKm/M46Hew==} + '@react-stately/select@3.6.9': + resolution: {integrity: sha512-vASUDv7FhEYQURzM+JIwcusPv7/x/l3zHc/oKJPvoCl3aa9pwS8hZwS82SC00o2iFnrDscfDJju4IE/cd4hucg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/selection@3.17.0': - resolution: {integrity: sha512-It3LRTaFOavybuDBvBH2mvCh73OL4awqvN4tZ0JzLzMtaYSBe9+YmFasYrzB0o7ca17B2q1tpUmsNWaAgIqbLA==} + '@react-stately/selection@3.18.0': + resolution: {integrity: sha512-6EaNNP3exxBhW2LkcRR4a3pg+3oDguZlBSqIVVR7lyahv/D8xXHRC4dX+m0mgGHJpsgjs7664Xx6c8v193TFxg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/slider@3.5.8': - resolution: {integrity: sha512-EDgbrxMq1w3+XTN72MGl3YtAG/j65EYX1Uc3Fh56K00+inJbTdRWyYTrb3NA310fXCd0WFBbzExuH2ohlKQycg==} + '@react-stately/slider@3.6.0': + resolution: {integrity: sha512-w5vJxVh267pmD1X+Ppd9S3ZzV1hcg0cV8q5P4Egr160b9WMcWlUspZPtsthwUlN7qQe/C8y5IAhtde4s29eNag==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/table@3.12.3': - resolution: {integrity: sha512-8uGrLcNJYeMbFtzRQZFWCBj5kV+7v3jzwoKIL1j9TmYUKow1PTDMQbPJpAZLQhnC2wVMlaFVgDbedSlbBij7Zg==} + '@react-stately/table@3.13.0': + resolution: {integrity: sha512-mRbNYrwQIE7xzVs09Lk3kPteEVFVyOc20vA8ph6EP54PiUf/RllJpxZe/WUYLf4eom9lUkRYej5sffuUBpxjCA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/tabs@3.6.10': - resolution: {integrity: sha512-F7wfoiNsrBy7c02AYHyE1USGgj05HQ0hp7uXmQjp2LEa+AA0NKKi3HdswTHHySxb0ZRuoEE7E7vp/gXQYx2/Ow==} + '@react-stately/tabs@3.7.0': + resolution: {integrity: sha512-ox4hTkfZCoR4Oyr3Op3rBlWNq2Wxie04vhEYpTZQ2hobR3l4fYaOkd7CPClILktJ3TC104j8wcb0knWxIBRx9w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/toggle@3.7.8': - resolution: {integrity: sha512-ySOtkByvIY54yIu8IZ4lnvomQA0H+/mkZnd6T5fKN3tjvIzHmkUk3TAPmNInUxHX148tSW6mWwec0xvjYqEd6w==} + '@react-stately/toggle@3.8.0': + resolution: {integrity: sha512-pyt/k/J8BwE/2g6LL6Z6sMSWRx9HEJB83Sm/MtovXnI66sxJ2EfQ1OaXB7Su5PEL9OMdoQF6Mb+N1RcW3zAoPw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/tooltip@3.4.13': - resolution: {integrity: sha512-zQ+8FQ7Pi0Cz852dltXb6yaryjE18K3byK4tIO3e5vnrZHEGvfdxowc+v9ak5UV93kVrYoOVmfZHRcEaTXTBNA==} + '@react-stately/tooltip@3.5.0': + resolution: {integrity: sha512-+xzPNztJDd2XJD0X3DgWKlrgOhMqZpSzsIssXeJgO7uCnP8/Z513ESaipJhJCFC8fxj5caO/DK4Uu8hEtlB8cQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/tree@3.8.5': - resolution: {integrity: sha512-0/tYhsKWQQJTOZFDwh8hY3Qk6ejNFRldGrLeK5kS22UZdvsMFyh7WAi40FTCJy561/VoB0WqQI4oyNPOa9lYWg==} + '@react-stately/tree@3.8.6': + resolution: {integrity: sha512-lblUaxf1uAuIz5jm6PYtcJ+rXNNVkqyFWTIMx6g6gW/mYvm8GNx1G/0MLZE7E6CuDGaO9dkLSY2bB1uqyKHidA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-stately/utils@3.10.4': - resolution: {integrity: sha512-gBEQEIMRh5f60KCm7QKQ2WfvhB2gLUr9b72sqUdIZ2EG+xuPgaIlCBeSicvjmjBvYZwOjoOEnmIkcx2GHp/HWw==} + '@react-stately/utils@3.10.5': + resolution: {integrity: sha512-iMQSGcpaecghDIh3mZEpZfoFH3ExBwTtuBEcvZ2XnGzCgQjeYXcMdIUwAfVQLXFTdHUHGF6Gu6/dFrYsCzySBQ==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/breadcrumbs@3.7.8': - resolution: {integrity: sha512-+BW2a+PrY8ArZ+pKecz13oJFrUAhthvXx17o3x0BhWUhRpAdtmTYt2hjw8zNanm2j0Kvgo1HYKgvtskCRxYcOA==} + '@react-types/breadcrumbs@3.7.9': + resolution: {integrity: sha512-eARYJo8J+VfNV8vP4uw3L2Qliba9wLV2bx9YQCYf5Lc/OE5B/y4gaTLz+Y2P3Rtn6gBPLXY447zCs5i7gf+ICg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/button@3.10.0': - resolution: {integrity: sha512-rAyU+N9VaHLBdZop4zasn8IDwf9I5Q1EzHUKMtzIFf5aUlMUW+K460zI/l8UESWRSWAXK9/WPSXGxfcoCEjvAA==} + '@react-types/button@3.10.1': + resolution: {integrity: sha512-XTtap8o04+4QjPNAshFWOOAusUTxQlBjU2ai0BTVLShQEjHhRVDBIWsI2B2FKJ4KXT6AZ25llaxhNrreWGonmA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/calendar@3.4.10': - resolution: {integrity: sha512-PyjqxwJxSW2IpQx6y0D9O34fRCWn1gv9q0qFhgaIigIQrPg8zTE/CC7owHLxAtgCnnCt8exJ5rqi414csaHKlA==} + '@react-types/calendar@3.5.0': + resolution: {integrity: sha512-O3IRE7AGwAWYnvJIJ80cOy7WwoJ0m8GtX/qSmvXQAjC4qx00n+b5aFNBYAQtcyc3RM5QpW6obs9BfwGetFiI8w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/checkbox@3.8.4': - resolution: {integrity: sha512-fvZrlQmlFNsYHZpl7GVmyYQlKdUtO5MczMSf8z3TlSiCb5Kl3ha9PsZgLhJqGuVnzB2ArIBz0eZrYa3k0PhcpA==} + '@react-types/checkbox@3.9.0': + resolution: {integrity: sha512-9hbHx0Oo2Hp5a8nV8Q75LQR0DHtvOIJbFaeqESSopqmV9EZoYjtY/h0NS7cZetgahQgnqYWQi44XGooMDCsmxA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/color@3.0.0': - resolution: {integrity: sha512-VUH8CROAM69GsMBilrJ1xyAdVsWL01nXQYrkZJxAEApv1OrcpIGSdsXLcGrjsrhjjiNVXxWFnqYRMsKkLzIl7g==} + '@react-types/color@3.0.1': + resolution: {integrity: sha512-KemFziO3GbmT3HEKrgOGdqNA6Gsmy9xrwFO3f8qXSG7gVz6M27Ic4R9HVQv4iAjap5uti6W13/pk2bc/jLVcEA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/combobox@3.13.0': - resolution: {integrity: sha512-kH/a+Fjpr54M2JbHg9RXwMjZ9O+XVsdOuE5JCpWRibJP1Mfl1md8gY6y6zstmVY8COrSqFvMZWB+PzwaTWjTGw==} + '@react-types/combobox@3.13.1': + resolution: {integrity: sha512-7xr+HknfhReN4QPqKff5tbKTe2kGZvH+DGzPYskAtb51FAAiZsKo+WvnNAvLwg3kRoC9Rkn4TAiVBp/HgymRDw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/datepicker@3.8.3': - resolution: {integrity: sha512-Y4qfPRBB6uzocosCOWSYMuwiZ3YXwLWQYiFB4KCglkvHyltbNz76LgoBEnclYA5HjwosIk4XywiXvHSYry8JnQ==} + '@react-types/datepicker@3.9.0': + resolution: {integrity: sha512-dbKL5Qsm2MQwOTtVQdOcKrrphcXAqDD80WLlSQrBLg+waDuuQ7H+TrvOT0thLKloNBlFUGnZZfXGRHINpih/0g==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/dialog@3.5.13': - resolution: {integrity: sha512-9k8daVcAqQsySkzDY6NIVlyGxtpEip4TKuLyzAehthbv78GQardD5fHdjQ6eXPRS4I2qZrmytrFFrlOnwWVGHw==} + '@react-types/dialog@3.5.14': + resolution: {integrity: sha512-OXWMjrALwrlgw8aHD8SeRm/s3tbAssdaEh2h73KUSeFau3fU3n5mfKv+WnFqsEaOtN261o48l7hTlS6615H9AA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/grid@3.2.9': - resolution: {integrity: sha512-eMw0d2UIZ4QTzGgD1wGGPw0cv67KjAOCp4TcwWjgDV7Wa5SVV/UvOmpnIVDyfhkG/4KRI5OR9h+isy76B726qA==} + '@react-types/grid@3.2.10': + resolution: {integrity: sha512-Z5cG0ITwqjUE4kWyU5/7VqiPl4wqMJ7kG/ZP7poAnLmwRsR8Ai0ceVn+qzp5nTA19cgURi8t3LsXn3Ar1FBoog==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/link@3.5.8': - resolution: {integrity: sha512-l/YGXddgAbLnIT7ekftXrK1D4n8NlLQwx0d4usyZpaxP1KwPzuwng20DxynamLc1atoKBqbUtZAnz32pe7vYgw==} + '@react-types/link@3.5.9': + resolution: {integrity: sha512-JcKDiDMqrq/5Vpn+BdWQEuXit4KN4HR/EgIi3yKnNbYkLzxBoeQZpQgvTaC7NEQeZnSqkyXQo3/vMUeX/ZNIKw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/listbox@3.5.2': - resolution: {integrity: sha512-ML/Bt/MeO0FiixcuFQ+smpu1WguxTOqHDjSnhc1vcNxVQFWQOhyVy01LAY2J/T9TjfjyYGD41vyMTI0f6fcLEQ==} + '@react-types/listbox@3.5.3': + resolution: {integrity: sha512-v1QXd9/XU3CCKr2Vgs7WLcTr6VMBur7CrxHhWZQQFExsf9bgJ/3wbUdjy4aThY/GsYHiaS38EKucCZFr1QAfqA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/menu@3.9.12': - resolution: {integrity: sha512-1SPnkHKJdvOfwv9fEgK1DI6DYRs4D3hW2XcWlLhVXSjaC68CzOHGwFhKIKvZiDTW/11L770PRSEloIxHR09uFQ==} + '@react-types/menu@3.9.13': + resolution: {integrity: sha512-7SuX6E2tDsqQ+HQdSvIda1ji/+ujmR86dtS9CUu5yWX91P25ufRjZ72EvLRqClWNQsj1Xl4+2zBDLWlceznAjw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/meter@3.4.4': - resolution: {integrity: sha512-0SEmPkShByC1gYkW7l+iJPg8QfEe2VrgwTciAtTfC4KIqAYmJVQtq6L+4d72EMxOh8RpQHePaY/RFHEJXAh72A==} + '@react-types/meter@3.4.5': + resolution: {integrity: sha512-04w1lEtvP/c3Ep8ND8hhH2rwjz2MtQ8o8SNLhahen3u0rX3jKOgD4BvHujsyvXXTMjj1Djp74sGzNawb4Ppi9w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/numberfield@3.8.6': - resolution: {integrity: sha512-VtWEMAXUO1S9EEZI8whc7xv6DVccxhbWsRthMCg/LxiwU3U5KAveadNc2c5rtXkRpd3cnD5xFzz3dExXdmHkAg==} + '@react-types/numberfield@3.8.7': + resolution: {integrity: sha512-KccMPi39cLoVkB2T0V7HW6nsxQVAwt89WWCltPZJVGzsebv/k0xTQlPVAgrUake4kDLoE687e3Fr/Oe3+1bDhw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/overlays@3.8.10': - resolution: {integrity: sha512-IcnB+VYfAJazRjWhBKZTmVMh3KTp/B1rRbcKkPx6t8djP9UQhKcohP7lAALxjJ56Jjz/GFC6rWyUcnYH0NFVRA==} + '@react-types/overlays@3.8.11': + resolution: {integrity: sha512-aw7T0rwVI3EuyG5AOaEIk8j7dZJQ9m34XAztXJVZ/W2+4pDDkLDbJ/EAPnuo2xGYRGhowuNDn4tDju01eHYi+w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/progress@3.5.7': - resolution: {integrity: sha512-EqMDHmlpoZUZzTjdejGIkSM0pS2LBI9NdadHf3bDNTycHv+5L1xpMHUg8RGOW8a3sRVLRvfN1aO9l75QZkyj+w==} + '@react-types/progress@3.5.8': + resolution: {integrity: sha512-PR0rN5mWevfblR/zs30NdZr+82Gka/ba7UHmYOW9/lkKlWeD7PHgl1iacpd/3zl/jUF22evAQbBHmk1mS6Mpqw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/radio@3.8.4': - resolution: {integrity: sha512-GCuOwQL19iwKa74NAIk9hv4ivyI8oW1+ZCuc2fzyDdeQjzTIlv3qrIyShwpVy1IoI7/4DYTMZm/YXPoKhu5TTA==} + '@react-types/radio@3.8.5': + resolution: {integrity: sha512-gSImTPid6rsbJmwCkTliBIU/npYgJHOFaI3PNJo7Y0QTAnFelCtYeFtBiWrFodSArSv7ASqpLLUEj9hZu/rxIg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/searchfield@3.5.9': - resolution: {integrity: sha512-c/x8BWpH1Zq+fWpeBtzw2AhQhGi7ahWPicV7PlnqwIGO0MrH/QCjX0dj+I+1xpcAh8Eq6ECa79HE74Rw6aJmFg==} + '@react-types/searchfield@3.5.10': + resolution: {integrity: sha512-7wW4pJzbReawoGPu8a4l+CODTCDN088EN/ysUzl622ewim57PjArjix+lpO4+aEtJqS9HKpq8UEbjwo9axpcUA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/select@3.9.7': - resolution: {integrity: sha512-Jva4ixfB4EEdy+WmZkUoLiQI7vVfHPxM73VuL7XDxvAO+YKiIztDTcU720QVNhxTMmQvCxfRBXWar8aodCjLiw==} + '@react-types/select@3.9.8': + resolution: {integrity: sha512-RGsYj2oFjXpLnfcvWMBQnkcDuKkwT43xwYWZGI214/gp/B64tJiIUgTM5wFTRAeGDX23EePkhCQF+9ctnqFd6g==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/shared@3.25.0': - resolution: {integrity: sha512-OZSyhzU6vTdW3eV/mz5i6hQwQUhkRs7xwY2d1aqPvTdMe0+2cY7Fwp45PAiwYLEj73i9ro2FxF9qC4DvHGSCgQ==} + '@react-types/shared@3.26.0': + resolution: {integrity: sha512-6FuPqvhmjjlpEDLTiYx29IJCbCNWPlsyO+ZUmCUXzhUv2ttShOXfw8CmeHWHftT/b2KweAWuzqSlfeXPR76jpw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/slider@3.7.6': - resolution: {integrity: sha512-z72wnEzSge6qTD9TUoUPp1A4j4jXk/MVii6rGE78XeE/Pq7HyyjU5bCagryMr9PC9MKa/oTiHcshKqWBDf57GA==} + '@react-types/slider@3.7.7': + resolution: {integrity: sha512-lYTR9zXQV2fSEm/G3gwDENWiki1IXd/oorsgf0zu1DBi2SQDbOsLsGUXiwvD24Xy6OkUuhAqjLPPexezo7+u9g==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/switch@3.5.6': - resolution: {integrity: sha512-gJ8t2yTCgcitz4ON4ELcLLmtlDkn2MUjjfu3ez/cwA1X/NUluPYkhXj5Z6H+KOlnveqrKCZDRoTgK74cQ6Cvfg==} + '@react-types/switch@3.5.7': + resolution: {integrity: sha512-1IKiq510rPTHumEZuhxuazuXBa2Cuxz6wBIlwf3NCVmgWEvU+uk1ETG0sH2yymjwCqhtJDKXi+qi9HSgPEDwAg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/table@3.10.2': - resolution: {integrity: sha512-YzA4hcsYfnFFpA2UyGb1KKhLpWgaj5daApqjp126tCIosl8k1KxZmhKD50cwH0Jm19lALJseqo5VdlcJtcr4qg==} + '@react-types/table@3.10.3': + resolution: {integrity: sha512-Ac+W+m/zgRzlTU8Z2GEg26HkuJFswF9S6w26r+R3MHwr8z2duGPvv37XRtE1yf3dbpRBgHEAO141xqS2TqGwNg==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/tabs@3.3.10': - resolution: {integrity: sha512-s/Bw/HCIdWJPBw4O703ghKqhjGsIerRMIDxA88hbQYzfTDD6bkFDjCnsP2Tyy1G8Dg2rSPFUEE+k+PpLzqeEfQ==} + '@react-types/tabs@3.3.11': + resolution: {integrity: sha512-BjF2TqBhZaIcC4lc82R5pDJd1F7kstj1K0Nokhz99AGYn8C0ITdp6lR+DPVY9JZRxKgP9R2EKfWGI90Lo7NQdA==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/textfield@3.9.7': - resolution: {integrity: sha512-vU5+QCOF9HgWGjAmmy+cpJibVW5voFomC5POmYHokm7kivYcMMjlonsgWwg/0xXrqE2qosH3tpz4jFoEuig1NQ==} + '@react-types/textfield@3.10.0': + resolution: {integrity: sha512-ShU3d6kLJGQjPXccVFjM3KOXdj3uyhYROqH9YgSIEVxgA9W6LRflvk/IVBamD9pJYTPbwmVzuP0wQkTDupfZ1w==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 - '@react-types/tooltip@3.4.12': - resolution: {integrity: sha512-FwsdSQ3UDIDORanQMGMLyzSUabw4AkKhwcRdPv4d5OT8GmJr7mBdZynfcsrKLJ0fzskIypMqspoutZidsI0MQg==} + '@react-types/tooltip@3.4.13': + resolution: {integrity: sha512-KPekFC17RTT8kZlk7ZYubueZnfsGTDOpLw7itzolKOXGddTXsrJGBzSB4Bb060PBVllaDO0MOrhPap8OmrIl1Q==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 '@repeaterjs/repeater@3.0.6': resolution: {integrity: sha512-Javneu5lsuhwNCryN+pXH93VPQ8g0dBX7wItHFgYiwQmzE1sVdg5tWHiOgHywzL2W21XQopa7IwIEnNbmeUJYA==} @@ -3368,88 +3389,103 @@ packages: resolution: {integrity: sha512-FqALmHI8D4o6lk/LRWDnhw95z5eO+eAa6ORjVg09YRR7BkcM6oPHU9uyC0gtQG5vpFLvgpeU4+zEAz2H8APHNw==} engines: {node: '>= 10'} - '@rollup/rollup-android-arm-eabi@4.24.0': - resolution: {integrity: sha512-Q6HJd7Y6xdB48x8ZNVDOqsbh2uByBhgK8PiQgPhwkIw/HC/YX5Ghq2mQY5sRMZWHb3VsFkWooUVOZHKr7DmDIA==} + '@rollup/rollup-android-arm-eabi@4.28.1': + resolution: {integrity: sha512-2aZp8AES04KI2dy3Ss6/MDjXbwBzj+i0GqKtWXgw2/Ma6E4jJvujryO6gJAghIRVz7Vwr9Gtl/8na3nDUKpraQ==} cpu: [arm] os: [android] - '@rollup/rollup-android-arm64@4.24.0': - resolution: {integrity: sha512-ijLnS1qFId8xhKjT81uBHuuJp2lU4x2yxa4ctFPtG+MqEE6+C5f/+X/bStmxapgmwLwiL3ih122xv8kVARNAZA==} + '@rollup/rollup-android-arm64@4.28.1': + resolution: {integrity: sha512-EbkK285O+1YMrg57xVA+Dp0tDBRB93/BZKph9XhMjezf6F4TpYjaUSuPt5J0fZXlSag0LmZAsTmdGGqPp4pQFA==} cpu: [arm64] os: [android] - '@rollup/rollup-darwin-arm64@4.24.0': - resolution: {integrity: sha512-bIv+X9xeSs1XCk6DVvkO+S/z8/2AMt/2lMqdQbMrmVpgFvXlmde9mLcbQpztXm1tajC3raFDqegsH18HQPMYtA==} + '@rollup/rollup-darwin-arm64@4.28.1': + resolution: {integrity: sha512-prduvrMKU6NzMq6nxzQw445zXgaDBbMQvmKSJaxpaZ5R1QDM8w+eGxo6Y/jhT/cLoCvnZI42oEqf9KQNYz1fqQ==} cpu: [arm64] os: [darwin] - '@rollup/rollup-darwin-x64@4.24.0': - resolution: {integrity: sha512-X6/nOwoFN7RT2svEQWUsW/5C/fYMBe4fnLK9DQk4SX4mgVBiTA9h64kjUYPvGQ0F/9xwJ5U5UfTbl6BEjaQdBQ==} + '@rollup/rollup-darwin-x64@4.28.1': + resolution: {integrity: sha512-WsvbOunsUk0wccO/TV4o7IKgloJ942hVFK1CLatwv6TJspcCZb9umQkPdvB7FihmdxgaKR5JyxDjWpCOp4uZlQ==} cpu: [x64] os: [darwin] - '@rollup/rollup-linux-arm-gnueabihf@4.24.0': - resolution: {integrity: sha512-0KXvIJQMOImLCVCz9uvvdPgfyWo93aHHp8ui3FrtOP57svqrF/roSSR5pjqL2hcMp0ljeGlU4q9o/rQaAQ3AYA==} + '@rollup/rollup-freebsd-arm64@4.28.1': + resolution: {integrity: sha512-HTDPdY1caUcU4qK23FeeGxCdJF64cKkqajU0iBnTVxS8F7H/7BewvYoG+va1KPSL63kQ1PGNyiwKOfReavzvNA==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.28.1': + resolution: {integrity: sha512-m/uYasxkUevcFTeRSM9TeLyPe2QDuqtjkeoTpP9SW0XxUWfcYrGDMkO/m2tTw+4NMAF9P2fU3Mw4ahNvo7QmsQ==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.28.1': + resolution: {integrity: sha512-QAg11ZIt6mcmzpNE6JZBpKfJaKkqTm1A9+y9O+frdZJEuhQxiugM05gnCWiANHj4RmbgeVJpTdmKRmH/a+0QbA==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm-musleabihf@4.24.0': - resolution: {integrity: sha512-it2BW6kKFVh8xk/BnHfakEeoLPv8STIISekpoF+nBgWM4d55CZKc7T4Dx1pEbTnYm/xEKMgy1MNtYuoA8RFIWw==} + '@rollup/rollup-linux-arm-musleabihf@4.28.1': + resolution: {integrity: sha512-dRP9PEBfolq1dmMcFqbEPSd9VlRuVWEGSmbxVEfiq2cs2jlZAl0YNxFzAQS2OrQmsLBLAATDMb3Z6MFv5vOcXg==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm64-gnu@4.24.0': - resolution: {integrity: sha512-i0xTLXjqap2eRfulFVlSnM5dEbTVque/3Pi4g2y7cxrs7+a9De42z4XxKLYJ7+OhE3IgxvfQM7vQc43bwTgPwA==} + '@rollup/rollup-linux-arm64-gnu@4.28.1': + resolution: {integrity: sha512-uGr8khxO+CKT4XU8ZUH1TTEUtlktK6Kgtv0+6bIFSeiSlnGJHG1tSFSjm41uQ9sAO/5ULx9mWOz70jYLyv1QkA==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-arm64-musl@4.24.0': - resolution: {integrity: sha512-9E6MKUJhDuDh604Qco5yP/3qn3y7SLXYuiC0Rpr89aMScS2UAmK1wHP2b7KAa1nSjWJc/f/Lc0Wl1L47qjiyQw==} + '@rollup/rollup-linux-arm64-musl@4.28.1': + resolution: {integrity: sha512-QF54q8MYGAqMLrX2t7tNpi01nvq5RI59UBNx+3+37zoKX5KViPo/gk2QLhsuqok05sSCRluj0D00LzCwBikb0A==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-powerpc64le-gnu@4.24.0': - resolution: {integrity: sha512-2XFFPJ2XMEiF5Zi2EBf4h73oR1V/lycirxZxHZNc93SqDN/IWhYYSYj8I9381ikUFXZrz2v7r2tOVk2NBwxrWw==} + '@rollup/rollup-linux-loongarch64-gnu@4.28.1': + resolution: {integrity: sha512-vPul4uodvWvLhRco2w0GcyZcdyBfpfDRgNKU+p35AWEbJ/HPs1tOUrkSueVbBS0RQHAf/A+nNtDpvw95PeVKOA==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-powerpc64le-gnu@4.28.1': + resolution: {integrity: sha512-pTnTdBuC2+pt1Rmm2SV7JWRqzhYpEILML4PKODqLz+C7Ou2apEV52h19CR7es+u04KlqplggmN9sqZlekg3R1A==} cpu: [ppc64] os: [linux] - '@rollup/rollup-linux-riscv64-gnu@4.24.0': - resolution: {integrity: sha512-M3Dg4hlwuntUCdzU7KjYqbbd+BLq3JMAOhCKdBE3TcMGMZbKkDdJ5ivNdehOssMCIokNHFOsv7DO4rlEOfyKpg==} + '@rollup/rollup-linux-riscv64-gnu@4.28.1': + resolution: {integrity: sha512-vWXy1Nfg7TPBSuAncfInmAI/WZDd5vOklyLJDdIRKABcZWojNDY0NJwruY2AcnCLnRJKSaBgf/GiJfauu8cQZA==} cpu: [riscv64] os: [linux] - '@rollup/rollup-linux-s390x-gnu@4.24.0': - resolution: {integrity: sha512-mjBaoo4ocxJppTorZVKWFpy1bfFj9FeCMJqzlMQGjpNPY9JwQi7OuS1axzNIk0nMX6jSgy6ZURDZ2w0QW6D56g==} + '@rollup/rollup-linux-s390x-gnu@4.28.1': + resolution: {integrity: sha512-/yqC2Y53oZjb0yz8PVuGOQQNOTwxcizudunl/tFs1aLvObTclTwZ0JhXF2XcPT/zuaymemCDSuuUPXJJyqeDOg==} cpu: [s390x] os: [linux] - '@rollup/rollup-linux-x64-gnu@4.24.0': - resolution: {integrity: sha512-ZXFk7M72R0YYFN5q13niV0B7G8/5dcQ9JDp8keJSfr3GoZeXEoMHP/HlvqROA3OMbMdfr19IjCeNAnPUG93b6A==} + '@rollup/rollup-linux-x64-gnu@4.28.1': + resolution: {integrity: sha512-fzgeABz7rrAlKYB0y2kSEiURrI0691CSL0+KXwKwhxvj92VULEDQLpBYLHpF49MSiPG4sq5CK3qHMnb9tlCjBw==} cpu: [x64] os: [linux] - '@rollup/rollup-linux-x64-musl@4.24.0': - resolution: {integrity: sha512-w1i+L7kAXZNdYl+vFvzSZy8Y1arS7vMgIy8wusXJzRrPyof5LAb02KGr1PD2EkRcl73kHulIID0M501lN+vobQ==} + '@rollup/rollup-linux-x64-musl@4.28.1': + resolution: {integrity: sha512-xQTDVzSGiMlSshpJCtudbWyRfLaNiVPXt1WgdWTwWz9n0U12cI2ZVtWe/Jgwyv/6wjL7b66uu61Vg0POWVfz4g==} cpu: [x64] os: [linux] - '@rollup/rollup-win32-arm64-msvc@4.24.0': - resolution: {integrity: sha512-VXBrnPWgBpVDCVY6XF3LEW0pOU51KbaHhccHw6AS6vBWIC60eqsH19DAeeObl+g8nKAz04QFdl/Cefta0xQtUQ==} + '@rollup/rollup-win32-arm64-msvc@4.28.1': + resolution: {integrity: sha512-wSXmDRVupJstFP7elGMgv+2HqXelQhuNf+IS4V+nUpNVi/GUiBgDmfwD0UGN3pcAnWsgKG3I52wMOBnk1VHr/A==} cpu: [arm64] os: [win32] - '@rollup/rollup-win32-ia32-msvc@4.24.0': - resolution: {integrity: sha512-xrNcGDU0OxVcPTH/8n/ShH4UevZxKIO6HJFK0e15XItZP2UcaiLFd5kiX7hJnqCbSztUF8Qot+JWBC/QXRPYWQ==} + '@rollup/rollup-win32-ia32-msvc@4.28.1': + resolution: {integrity: sha512-ZkyTJ/9vkgrE/Rk9vhMXhf8l9D+eAhbAVbsGsXKy2ohmJaWg0LPQLnIxRdRp/bKyr8tXuPlXhIoGlEB5XpJnGA==} cpu: [ia32] os: [win32] - '@rollup/rollup-win32-x64-msvc@4.24.0': - resolution: {integrity: sha512-fbMkAF7fufku0N2dE5TBXcNlg0pt0cJue4xBRE2Qc5Vqikxr4VCgKj/ht6SMdFcOacVA9rqF70APJ8RN/4vMJw==} + '@rollup/rollup-win32-x64-msvc@4.28.1': + resolution: {integrity: sha512-ZvK2jBafvttJjoIdKm/Q/Bh7IJ1Ose9IBOwpOXcOvW3ikGTQGmKDgxTC6oCAzW6PynbkKP8+um1du81XJHZ0JA==} cpu: [x64] os: [win32] - '@rrweb/types@2.0.0-alpha.17': - resolution: {integrity: sha512-AfDTVUuCyCaIG0lTSqYtrZqJX39ZEYzs4fYKnexhQ+id+kbZIpIJtaut5cto6dWZbB3SEe4fW0o90Po3LvTmfg==} + '@rrweb/types@2.0.0-alpha.18': + resolution: {integrity: sha512-iMH3amHthJZ9x3gGmBPmdfim7wLGygC2GciIkw2A6SO8giSn8PHYtRT8OKNH4V+k3SZ6RSnYHcTQxBA7pSWZ3Q==} '@rtsao/scc@1.1.0': resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==} @@ -3494,20 +3530,20 @@ packages: resolution: {integrity: sha512-zaYmoH0NWWtvnJjC9/CBseXMtKHm/tm40sz3YfJRxeQjyzRqNQPgivpd9R/oDJCYj999mzdW382p/qi2ypjLww==} engines: {node: '>=6'} - '@shikijs/core@1.22.0': - resolution: {integrity: sha512-S8sMe4q71TJAW+qG93s5VaiihujRK6rqDFqBnxqvga/3LvqHEnxqBIOPkt//IdXVtHkQWKu4nOQNk0uBGicU7Q==} + '@shikijs/core@1.24.2': + resolution: {integrity: sha512-BpbNUSKIwbKrRRA+BQj0BEWSw+8kOPKDJevWeSE/xIqGX7K0xrCZQ9kK0nnEQyrzsUoka1l81ZtJ2mGaCA32HQ==} - '@shikijs/engine-javascript@1.22.0': - resolution: {integrity: sha512-AeEtF4Gcck2dwBqCFUKYfsCq0s+eEbCEbkUuFou53NZ0sTGnJnJ/05KHQFZxpii5HMXbocV9URYVowOP2wH5kw==} + '@shikijs/engine-javascript@1.24.2': + resolution: {integrity: sha512-EqsmYBJdLEwEiO4H+oExz34a5GhhnVp+jH9Q/XjPjmBPc6TE/x4/gD0X3i0EbkKKNqXYHHJTJUpOLRQNkEzS9Q==} - '@shikijs/engine-oniguruma@1.22.0': - resolution: {integrity: sha512-5iBVjhu/DYs1HB0BKsRRFipRrD7rqjxlWTj4F2Pf+nQSPqc3kcyqFFeZXnBMzDf0HdqaFVvhDRAGiYNvyLP+Mw==} + '@shikijs/engine-oniguruma@1.24.2': + resolution: {integrity: sha512-ZN6k//aDNWRJs1uKB12pturKHh7GejKugowOFGAuG7TxDRLod1Bd5JhpOikOiFqPmKjKEPtEA6mRCf7q3ulDyQ==} - '@shikijs/types@1.22.0': - resolution: {integrity: sha512-Fw/Nr7FGFhlQqHfxzZY8Cwtwk5E9nKDUgeLjZgt3UuhcM3yJR9xj3ZGNravZZok8XmEZMiYkSMTPlPkULB8nww==} + '@shikijs/types@1.24.2': + resolution: {integrity: sha512-bdeWZiDtajGLG9BudI0AHet0b6e7FbR0EsE4jpGaI0YwHm/XJunI9+3uZnzFtX65gsyJ6ngCIWUfA4NWRPnBkQ==} - '@shikijs/vscode-textmate@9.3.0': - resolution: {integrity: sha512-jn7/7ky30idSkd/O5yDBfAnVt+JJpepofP/POZ1iMOxK59cOfqIgg/Dj0eFsjOTMw+4ycJN0uhZH/Eb0bs/EUA==} + '@shikijs/vscode-textmate@9.3.1': + resolution: {integrity: sha512-79QfK1393x9Ho60QFyLti+QfdJzRQCVLFb97kOIV7Eo9vQU/roINgk7m24uv0a7AUvN//RDH36FLjjK48v0s9g==} '@shuding/opentype.js@1.4.0-beta.0': resolution: {integrity: sha512-3NgmNyH3l/Hv6EvsWJbsvpcpUba6R8IREQ83nH83cyakCw7uM1arZKNfHwv1Wz6jgqrF/j4x5ELvR6PnK9nTcA==} @@ -3517,14 +3553,6 @@ packages: '@sinclair/typebox@0.27.8': resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} - '@sindresorhus/slugify@2.2.1': - resolution: {integrity: sha512-MkngSCRZ8JdSOCHRaYd+D01XhvU3Hjy6MGl06zhOk614hp9EOAp5gIkBeQg7wtmxpitU6eAL4kdiRMcJa2dlrw==} - engines: {node: '>=12'} - - '@sindresorhus/transliterate@1.6.0': - resolution: {integrity: sha512-doH1gimEu3A46VX6aVxpHTeHrytJAG6HgdxntYnCFiIFHEM/ZGpG8KiZGBChchjQmG0XFIBL552kBTjVcMZXwQ==} - engines: {node: '>=12'} - '@storybook/channels@7.6.20': resolution: {integrity: sha512-4hkgPSH6bJclB2OvLnkZOGZW1WptJs09mhQ6j6qLjgBZzL/ZdD6priWSd7iXrmPiN5TzUobkG4P4Dp7FjkiO7A==} @@ -3537,8 +3565,8 @@ packages: '@storybook/csf-tools@7.6.20': resolution: {integrity: sha512-rwcwzCsAYh/m/WYcxBiEtLpIW5OH1ingxNdF/rK9mtGWhJxXRDV8acPkFrF8rtFWIVKoOCXu5USJYmc3f2gdYQ==} - '@storybook/csf@0.1.11': - resolution: {integrity: sha512-dHYFQH3mA+EtnCkHXzicbLgsvzYjcDJ1JWsogbItZogkPHgSJM/Wr71uMkcvw8v9mmCyP4NpXJuu6bPoVsOnzg==} + '@storybook/csf@0.1.12': + resolution: {integrity: sha512-9/exVhabisyIVL0VxTCxo01Tdm8wefIXKXfltAPTSr8cbLn5JAxGQ6QV3mjdecLGEOucfoVhAKtJfVHxEK1iqw==} '@storybook/global@5.0.0': resolution: {integrity: sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ==} @@ -3591,8 +3619,8 @@ packages: '@swc/counter@0.1.3': resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} - '@swc/helpers@0.5.13': - resolution: {integrity: sha512-UoKGxQ3r5kYI9dALKJapMmuK+1zWM/H17Z1+iwnNmzcJRnfFuevZs375TA5rW31pu4BS4NoSy1fRsexDXfWn5w==} + '@swc/helpers@0.5.15': + resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==} '@swc/helpers@0.5.5': resolution: {integrity: sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==} @@ -3602,20 +3630,20 @@ packages: peerDependencies: tailwindcss: '>=3.2.0' - '@tanem/react-nprogress@5.0.52': - resolution: {integrity: sha512-ncDcnDS/vm79sqPmBmeslA5TeA3+7VMmLgo6SPkjmRivHmImV/5DqzcbCltopkpadZ5/mSyCCPoQEi1bvU/VMQ==} + '@tanem/react-nprogress@5.0.53': + resolution: {integrity: sha512-leYKdK85/MSFz2WdcZKMI6Xaq63U06V6ERk1H+ovPPIWeVemK3HIBPb3O3Hsee9RliV5kolcM06epcuY36q0Dw==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 - '@tanstack/react-virtual@3.10.8': - resolution: {integrity: sha512-VbzbVGSsZlQktyLrP5nxE+vE1ZR+U0NFAWPbJLoG2+DKPwd2D7dVICTVIIaYlJqX1ZCEnYDbaOpmMwbsyhBoIA==} + '@tanstack/react-virtual@3.11.1': + resolution: {integrity: sha512-orn2QNe5tF6SqjucHJ6cKTKcRDe3GG7bcYqPNn72Yejj7noECdzgAyRfGt2pGDPemhYim3d1HIR/dgruCnLfUA==} peerDependencies: - react: ^16.8.0 || ^17.0.0 || ^18.0.0 - react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - '@tanstack/virtual-core@3.10.8': - resolution: {integrity: sha512-PBu00mtt95jbKFi6Llk9aik8bnR3tR/oQP1o3TSi+iG//+Q2RTIzCEgKkHG8BB86kxMNW6O8wku+Lmi+QFR6jA==} + '@tanstack/virtual-core@3.10.9': + resolution: {integrity: sha512-kBknKOKzmeR7lN+vSadaKWXaLS0SZZG+oqpQ/k80Q6g9REn6zRHS/ZYdrIzHnpHgy/eWs00SujveUN/GJT2qTw==} '@theguild/remark-mermaid@0.0.5': resolution: {integrity: sha512-e+ZIyJkEv9jabI4m7q29wZtZv+2iwPGsXJ2d46Zi7e+QcFudiyuqhLhHG/3gX3ZEB+hxTch+fpItyMS8jwbIcw==} @@ -3655,11 +3683,11 @@ packages: '@emotion/react': ^11.13.3 react: '>=18' - '@theme-ui/match-media@0.17.0': - resolution: {integrity: sha512-5ePc/dk7uvcq48WIPS9TRKlwrsQM0nWILnLPyil2UQsv+myyaud51oDWqtQPW9fUuHdHbfgFykiBYDa9vs9Y1A==} + '@theme-ui/match-media@0.17.1': + resolution: {integrity: sha512-rsbq1iT9IVOI4cmtrNHvEHRD4NfEkbqzQwXLyKmn25D0GFKcrx/65HELFvMzZErvAvkTtQqI+535vEoOsbu3YQ==} peerDependencies: - '@theme-ui/core': ^0.17.0 - '@theme-ui/css': ^0.17.0 + '@theme-ui/core': ^0.17.1 + '@theme-ui/css': ^0.17.1 react: '>=18' '@theme-ui/theme-provider@0.17.1': @@ -3734,8 +3762,8 @@ packages: '@types/d3-path@3.1.0': resolution: {integrity: sha512-P2dlU/q51fkOc/Gfl3Ul9kicV7l+ra934qBFXCFhrZMOL6du1TM0pm1ThYvENukyOn5h9v+yMJ9Fn5JK4QozrQ==} - '@types/d3-scale-chromatic@3.0.3': - resolution: {integrity: sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw==} + '@types/d3-scale-chromatic@3.1.0': + resolution: {integrity: sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==} '@types/d3-scale@4.0.8': resolution: {integrity: sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ==} @@ -3743,8 +3771,8 @@ packages: '@types/d3-shape@3.1.6': resolution: {integrity: sha512-5KKk5aKGu2I+O6SONMYSNflgiP0WfZIQvVUMan50wHsLG1G94JlxEVnCpQARfTtzytuY0p/9PXXZb3I7giofIA==} - '@types/d3-time@3.0.3': - resolution: {integrity: sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw==} + '@types/d3-time@3.0.4': + resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} '@types/d3-timer@3.0.2': resolution: {integrity: sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==} @@ -3818,11 +3846,11 @@ packages: '@types/node-forge@1.3.11': resolution: {integrity: sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==} - '@types/node@18.19.58': - resolution: {integrity: sha512-2ryJttbOAWCYuZMdk4rmZZ6oqE+GSL5LxbaTVe4PCs0FUrHObZZAQL4ihMw9/cH1Pn8lSQ9TXVhsM4LrnfZ0aA==} + '@types/node@18.19.68': + resolution: {integrity: sha512-QGtpFH1vB99ZmTa63K4/FU8twThj4fuVSBkGddTp7uIL/cuoLWIUSL2RcOaigBhfR+hg5pgGkBnkoOxrTVBMKw==} - '@types/node@22.7.8': - resolution: {integrity: sha512-a922jJy31vqR5sk+kAdIENJjHblqcZ4RmERviFsER4WJcEONqxKcjNOlk0q7OUfrF5sddT+vng070cdfMlrPLg==} + '@types/node@22.10.2': + resolution: {integrity: sha512-Xxr6BBRCAOQixvonOye19wnzyDiUtTeqldOOmj3CkeblonbccA12PFwlufvRdrpjXxqnmUaeiU5EOA+7s5diUQ==} '@types/parse-json@4.0.2': resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} @@ -3830,23 +3858,25 @@ packages: '@types/pbkdf2@3.1.2': resolution: {integrity: sha512-uRwJqmiXmh9++aSu1VNEn3iIxWOhd8AHXNSdlaLfdAAdSTY9jYVeGWnzejM3dvrkbqE3/hyQkQQ29IFATEGlew==} - '@types/prismjs@1.26.4': - resolution: {integrity: sha512-rlAnzkW2sZOjbqZ743IHUhFcvzaGbqijwOu8QZnZCjfQzBqFE3s4lOTJEsxikImav9uzz/42I+O7YUs1mWgMlg==} + '@types/prismjs@1.26.5': + resolution: {integrity: sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ==} - '@types/prop-types@15.7.13': - resolution: {integrity: sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA==} + '@types/prop-types@15.7.14': + resolution: {integrity: sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ==} - '@types/qs@6.9.16': - resolution: {integrity: sha512-7i+zxXdPD0T4cKDuxCUXJ4wHcsJLwENa6Z3dCu8cfCK743OGy5Nu1RmAGqDPsoTDINVEcdXKRvR/zre+P2Ku1A==} + '@types/qs@6.9.17': + resolution: {integrity: sha512-rX4/bPcfmvxHDv0XjfJELTTr+iB+tn032nPILqHm5wbthUUUuVtNGGqzhya9XUxjTP8Fpr0qYgSZZKxGY++svQ==} '@types/range-parser@1.2.7': resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} - '@types/react-dom@18.3.1': - resolution: {integrity: sha512-qW1Mfv8taImTthu4KoXgDfLuk4bydU6Q/TkADnDWWHwi4NX4BR+LWfTp2sVmTqRrsHvyDDTelgelxJ+SsejKKQ==} + '@types/react-dom@18.3.5': + resolution: {integrity: sha512-P4t6saawp+b/dFrUr2cvkVsfvPguwsxtH6dNIYRllMsefqFzkZk5UIjzyDOv5g1dXIPdG4Sp1yCR4Z6RCUsG/Q==} + peerDependencies: + '@types/react': ^18.0.0 - '@types/react@18.3.12': - resolution: {integrity: sha512-D2wOSq/d6Agt28q7rSI3jhU7G6aiuzljDGZ2hTZHIkrTLUI+AF3WMeKkEZ9nN2fkBAlcktT6vcZjDFiIhMYEQw==} + '@types/react@18.3.16': + resolution: {integrity: sha512-oh8AMIC4Y2ciKufU8hnKgs+ufgbA/dhPTACaZPM86AbwX9QwnFtSoPWEeRUj8fge+v6kFt78BXcDhAU1SrrAsw==} '@types/readable-stream@2.3.15': resolution: {integrity: sha512-oM5JSKQCcICF1wvGgmecmHldZ48OZamtMxcGGVICOJA8o8cahXC1zEVAif8iwoc5j8etxFaRFnf095+CDsuoFQ==} @@ -3863,8 +3893,8 @@ packages: '@types/serve-static@1.15.7': resolution: {integrity: sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==} - '@types/styled-system@5.1.22': - resolution: {integrity: sha512-NbRp37zWcrf/+Qf2NumdyZfhSx1dzJ50zgfKvnezYJx1HTRUMVYY8jtWvK1eoIAa6F5sXwHLhE8oXNu15ThBAA==} + '@types/styled-system@5.1.23': + resolution: {integrity: sha512-mIwCCdhDa2ifdQCEm8ZeD8m4UEbFsokqEoT9YNOUv4alUJ8jbMKxvpr+oOwfuZgwqLh5HjWuEzwnX7DzWvjFBg==} '@types/supports-color@8.1.3': resolution: {integrity: sha512-Hy6UMpxhE3j1tLpl27exp1XqHD7n8chAiNPzWfz16LPZoMMoSc4dzLl6w9qijkEb/r5O1ozdu1CWGA2L83ZeZg==} @@ -3878,8 +3908,8 @@ packages: '@types/validator@13.12.2': resolution: {integrity: sha512-6SlHBzUW8Jhf3liqrGGXyTJSIFe4nqlJ5A5KaMZ2l/vbM3Wh3KSybots/wfWVzNLK4D1NZluDlSQIbIEPx6oyA==} - '@types/ws@8.5.12': - resolution: {integrity: sha512-3tPRkv1EtkDpzlgyKyI8pGsGZAGPEaXeu0DOj5DI25Ja91bdAYddYHbADRYVrZMRbfW+1l5YwXVDKohDJNQxkQ==} + '@types/ws@8.5.13': + resolution: {integrity: sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA==} '@typescript-eslint/eslint-plugin@6.21.0': resolution: {integrity: sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==} @@ -3939,8 +3969,8 @@ packages: resolution: {integrity: sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==} engines: {node: ^16.0.0 || >=18.0.0} - '@ungap/structured-clone@1.2.0': - resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==} + '@ungap/structured-clone@1.2.1': + resolution: {integrity: sha512-fEzPV3hSkSMltkw152tJKNARhOupqbH96MZWyRjNaYZOMIzbrTeQDG+MTc6Mr2pgzFQzFxAfmhGDNP5QK++2ZA==} '@uniswap/lib@4.0.1-alpha': resolution: {integrity: sha512-f6UIliwBbRsgVLxIaBANF6w09tYqc6Y/qXdsrbEmXHyFA7ILiKrIwRFXe1yOg8M3cksgVsO9N7yuL2DdCGQKBA==} @@ -3950,6 +3980,10 @@ packages: resolution: {integrity: sha512-OME7WR6+5QwQs45A2079r+/FS0zU944+JCQwUX9GyIriCxqw2pGu4F9IEqmlwD+zSIMml0+MJnJJ47pFgSyWDw==} engines: {node: '>=10'} + '@uniswap/sdk-core@6.1.0': + resolution: {integrity: sha512-pJVv8rJZwemcp9xINFG7hjxM4H+1FNiDqjpxBabwpCBsBFKfJPwe65Wa8pk8p1yT3QOgA0yFEQuavNsmTdtJ7w==} + engines: {node: '>=10'} + '@uniswap/swap-router-contracts@1.3.1': resolution: {integrity: sha512-mh/YNbwKb7Mut96VuEtL+Z5bRe0xVIbjjiryn+iMMrK2sFKhR4duk/86mEz0UO5gSx4pQIw9G5276P5heY/7Rg==} engines: {node: '>=10'} @@ -3970,8 +4004,8 @@ packages: resolution: {integrity: sha512-S4+m+wh8HbWSO3DKk4LwUCPZJTpCugIsHrWR86m/OrUyvSqGDTXKFfc2sMuGXCZrD1ZqO3rhQsKgdWg3Hbb2Kw==} engines: {node: '>=10'} - '@uniswap/v3-sdk@3.18.1': - resolution: {integrity: sha512-TGrKLToSWwfx6VV2d7fh4kwQMlgspXTLE49ep5zfYODVVqV6WhrRdbteHb3e0bjdjxGSj0gzoLmhsjmoJTE1/g==} + '@uniswap/v3-sdk@3.19.0': + resolution: {integrity: sha512-HbX3YjHJRXI2LFCxLUWgPfRZX6N9a+cELJ3Dus5vYDPYYjFOwJr16c2esDsdHUe3TG2oOeA/u2wv9TDT2GSBIw==} engines: {node: '>=10'} '@uniswap/v3-staker@1.0.0': @@ -4004,13 +4038,13 @@ packages: '@vitest/utils@1.6.0': resolution: {integrity: sha512-21cPiuGMoMZwiOHa2i4LXkMkMkCGzA+MVFV70jRwHo95dL4x/ts5GZhML1QWuy7yfp3WzK3lRvZi3JnXTYqrBw==} - '@web3icons/common@0.4.1': - resolution: {integrity: sha512-WuWMA5KUeSdFdeFefokfPk9jGCfJ2IGIx81NQ8bwStpHdCDXsNxWiqg3YO34MM7epDSf/ROGvuvOVBjLo2YBWg==} + '@web3icons/common@0.7.2': + resolution: {integrity: sha512-zgk15pcLKNM+3ve35vOnFvOLOfEb+7+WhgjyNQQfJSM9AsuLAQIgcDV1r5GCJqzjRrFIinS1eI3z8xuYVWHaiQ==} peerDependencies: typescript: ^5.0.0 - '@web3icons/react@3.10.1': - resolution: {integrity: sha512-8IoVo3vk8CBFPEQILGBWv38zwAExh5xXMznHSU0Db7Cf0vAHPc8q04hjcT5+M0a7VGnwSOH9f4+883bONRvogg==} + '@web3icons/react@3.13.2': + resolution: {integrity: sha512-d7CKIBCOglWpCIPMee3nSxK6Af5Xc274F+N78VBt2cKqVy++koifr9uF97qKQt0v0z3XSFP6NLFHWCZLyHpivg==} peerDependencies: react: ^18.2.0 @@ -4022,37 +4056,25 @@ packages: resolution: {integrity: sha512-ApcWxkrs1WmEMS2CaLLFUEem/49erT3sxIVjpzU5f6zmVcnijtDSrhoK2zVobOIikZJdH63jdAXOrvjf6eOUNQ==} engines: {node: '>=18.0.0'} - '@whatwg-node/fetch@0.9.22': - resolution: {integrity: sha512-+RIBffgoaRlWV9cKV6wAX71sbeoU2APOI3G13ZRMkabYHwkvDMeZDTyxJcsMXA5CpieJ7NFXF9Xyu72jwvdzqA==} + '@whatwg-node/fetch@0.10.1': + resolution: {integrity: sha512-gmPOLrsjSZWEZlr9Oe5+wWFBq3CG6fN13rGlM91Jsj/vZ95G9CCvrORGBAxMXy0AJGiC83aYiHXn3JzTzXQmbA==} engines: {node: '>=18.0.0'} - '@whatwg-node/node-fetch@0.5.27': - resolution: {integrity: sha512-0OaMj5W4fzWimRSFq07qFiWfquaUMNB+695GwE76LYKVuah+jwCdzSgsIOtwPkiyJ35w0XGhXmJPiIJCdLwopg==} + '@whatwg-node/fetch@0.9.23': + resolution: {integrity: sha512-7xlqWel9JsmxahJnYVUj/LLxWcnA93DR4c9xlw3U814jWTiYalryiH1qToik1hOxweKKRLi4haXHM5ycRksPBA==} engines: {node: '>=18.0.0'} - '@whatwg-node/server@0.9.50': - resolution: {integrity: sha512-7Vd8k6iu+ps8bkZT+Y/wPm42EDh8KojAL+APKa79mntgkyPtdq0r1//CO+0eYqQBz6HGrDxHRT4KChSOy4jGIw==} + '@whatwg-node/node-fetch@0.6.0': + resolution: {integrity: sha512-tcZAhrpx6oVlkEsRngeTEEE7I5/QdLjeEz4IlekabGaESP7+Dkm/6a9KcF1KdCBB7mO9PXtBkwCuTCt8+UPg8Q==} engines: {node: '>=18.0.0'} - '@wry/caches@1.0.1': - resolution: {integrity: sha512-bXuaUNLVVkD20wcGBWRyo7j9N3TxePEWFZj2Y+r9OoUzfqmavM84+mFykRicNsBqatba5JLay1t48wxaXaWnlA==} - engines: {node: '>=8'} - - '@wry/context@0.7.4': - resolution: {integrity: sha512-jmT7Sb4ZQWI5iyu3lobQxICu2nC/vbUhP0vIdd6tHC9PTfenmRmuIFqktc6GH9cgi+ZHnsLWPvfSvc4DrYmKiQ==} - engines: {node: '>=8'} - - '@wry/equality@0.5.7': - resolution: {integrity: sha512-BRFORjsTuQv5gxcXsuDXx6oGRhuVsEGwZy6LOzRRfgu+eSfxbhUQ9L9YtSEIuIjY/o7g3iWFjrc5eSY1GXP2Dw==} - engines: {node: '>=8'} - - '@wry/trie@0.4.3': - resolution: {integrity: sha512-I6bHwH0fSf6RqQcnnXLJKhkSXG45MFral3GxPaY4uAl0LYDZM+YDVDAiU9bYwjTuysy1S0IeecWtmq1SZA3M1w==} - engines: {node: '>=8'} + '@whatwg-node/node-fetch@0.7.5': + resolution: {integrity: sha512-t7kGrt2fdfNvzy1LCAE9/OnIyMtizgFhgJmk7iLJwQsLmR7S86F8Q4aDRPbCfo7pISJP6Fx/tPdfFNjHS23WTA==} + engines: {node: '>=18.0.0'} - '@wry/trie@0.5.0': - resolution: {integrity: sha512-FNoYzHawTMk/6KMQoEG5O4PuioX19UbwdQKF44yw0nLfOypfQdjtfZzo/UIJWAJ23sNIFbD1Ug9lbaDGMwbqQA==} - engines: {node: '>=8'} + '@whatwg-node/server@0.9.63': + resolution: {integrity: sha512-rHBN2murCcuuhQru/AQjA13lA9SzQAH9k8ENy4iZrAmY+C0yFYPud3HiFgPUgzR1B2KYUpIYKwC1UAUlkzASOQ==} + engines: {node: '>=18.0.0'} '@xobotyi/scrollbar-width@1.9.5': resolution: {integrity: sha512-N8tkAACJx2ww8vFMneJmaAgmjAG1tnVBZJRLRcx061tmsLRZHSEZSLuGWnwPtunsSLvSqXQ2wfp7Mgqg1I+2dQ==} @@ -4100,8 +4122,8 @@ packages: resolution: {integrity: sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==} engines: {node: '>=0.4.0'} - acorn@8.13.0: - resolution: {integrity: sha512-8zSiw54Oxrdym50NlZ9sUusyO1Z1ZchgRLWRaK6c86XJFClyCgFKetdowBg5bKxyp/u+CDBJG4Mpp0m3HLZl9w==} + acorn@8.14.0: + resolution: {integrity: sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==} engines: {node: '>=0.4.0'} hasBin: true @@ -4116,8 +4138,8 @@ packages: resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} engines: {node: '>= 6.0.0'} - agent-base@7.1.1: - resolution: {integrity: sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==} + agent-base@7.1.3: + resolution: {integrity: sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==} engines: {node: '>= 14'} aggregate-error@3.1.0: @@ -4138,8 +4160,8 @@ packages: ajv@8.17.1: resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} - algoliasearch@5.14.2: - resolution: {integrity: sha512-aYjI4WLamMxbhdJ2QAA99VbDCJOGzMOdT2agh57bi40n86ufkhZSIAf6mkocr7NmtBLtwCnSHvD5NJ+Ky5elWw==} + algoliasearch@5.17.1: + resolution: {integrity: sha512-3CcbT5yTWJDIcBe9ZHgsPi184SkT1kyZi3GWlQU5EFgvq1V73X2sqHRkPCQMe0RA/uvZbB+1sFeAk73eWygeLg==} engines: {node: '>= 14.0.0'} ansi-align@3.0.1: @@ -4299,8 +4321,8 @@ packages: resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==} engines: {node: '>=8.0.0'} - attr-accept@2.2.4: - resolution: {integrity: sha512-2pA6xFIbdTUDCAwjN8nQwI+842VwzbDUXO2IYlpPXQIORgKnavorcr4Ce3rwh+zsNg9zK7QPsdvDj3Lum4WX4w==} + attr-accept@2.2.5: + resolution: {integrity: sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==} engines: {node: '>=4'} auto-bind@4.0.0: @@ -4318,13 +4340,10 @@ packages: resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} engines: {node: '>= 0.4'} - axe-core@4.10.1: - resolution: {integrity: sha512-qPC9o+kD8Tir0lzNGLeghbOrWMr3ZJpaRlCIb6Uobt/7N4FiEDvqUMnxzCHRHmg8vOg14kr5gVNyScRmbMaJ9g==} + axe-core@4.10.2: + resolution: {integrity: sha512-RE3mdQ7P3FRSe7eqCWoeQ/Z9QXrtniSjp1wUjt5nRC3WIpz5rSCve6o3fsZ2aCpJtrZjSZgjwXAoTO5k4tEI0w==} engines: {node: '>=4'} - axios@1.7.7: - resolution: {integrity: sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==} - axobject-query@4.1.0: resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==} engines: {node: '>= 0.4'} @@ -4397,8 +4416,8 @@ packages: blakejs@1.2.1: resolution: {integrity: sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ==} - bn.js@4.12.0: - resolution: {integrity: sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==} + bn.js@4.12.1: + resolution: {integrity: sha512-k8TVBiPkPJT9uHLdOKfFpqcfprwBFOAAXXozRubr7R7PfIuKvQlzcI4M0pALeqXN09vdaMbUdUj+pass+uULAg==} bn.js@5.2.1: resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} @@ -4437,8 +4456,8 @@ packages: browserify-aes@1.2.0: resolution: {integrity: sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==} - browserslist@4.24.2: - resolution: {integrity: sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==} + browserslist@4.24.3: + resolution: {integrity: sha512-1CPmv8iobE2fyRMV97dAcMVegvvWKxmq94hkLiAkUGwKVTyDLw33K+ZxiFrREKmmps4rIw6grcCFCnTMSZ/YiA==} engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} hasBin: true @@ -4492,8 +4511,16 @@ packages: resolution: {integrity: sha512-B+L5iIa9mgcjLbliir2th36yEwPftrzteHYujzsx3dFP/31GCHcIeS8f5MGd80odLOjaOvSpU3EEAmRQptkxLQ==} engines: {node: ^16.14.0 || >=18.0.0} - call-bind@1.0.7: - resolution: {integrity: sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==} + call-bind-apply-helpers@1.0.1: + resolution: {integrity: sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g==} + engines: {node: '>= 0.4'} + + call-bind@1.0.8: + resolution: {integrity: sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==} + engines: {node: '>= 0.4'} + + call-bound@1.0.2: + resolution: {integrity: sha512-0lk0PHFe/uz0vl527fG9CgdE9WdafjDbCXvBbs+LUv000TVt2Jjhqbs4Jwm8gz070w8xXyEAxrPOMullsxXeGg==} engines: {node: '>= 0.4'} callsites@3.1.0: @@ -4518,8 +4545,8 @@ packages: camelize@1.0.1: resolution: {integrity: sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ==} - caniuse-lite@1.0.30001669: - resolution: {integrity: sha512-DlWzFDJqstqtIVx1zeSpIMLjunf5SmwOw0N2Ck/QSQdS8PLS4+9HrLaYei4w8BIAL7IB/UEDu889d8vhCTPA0w==} + caniuse-lite@1.0.30001688: + resolution: {integrity: sha512-Nmqpru91cuABu/DTCXbM2NSRHzM2uVHfPnhJ/1zEAJx/ILBRVmz3pzH4N7DZqbdG0gWClsCC05Oj0mJ/1AWMbA==} capital-case@1.0.4: resolution: {integrity: sha512-ds37W8CytHgwnhGGTi88pcPyR15qoNkOpYwmMMfnWqqWgESapLqvDx6huFjQ5vqWSn2Z06173XNA7LtMOeUh1A==} @@ -4608,12 +4635,13 @@ packages: ci-info@2.0.0: resolution: {integrity: sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==} - ci-info@4.0.0: - resolution: {integrity: sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==} + ci-info@4.1.0: + resolution: {integrity: sha512-HutrvTNsF48wnxkzERIXOe5/mlcfFcbfCmwcg6CJnizbSue78AbDt+1cgl26zwn61WFxhcPykPfZrbqjGmBb4A==} engines: {node: '>=8'} - cipher-base@1.0.4: - resolution: {integrity: sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==} + cipher-base@1.0.6: + resolution: {integrity: sha512-3Ek9H3X6pj5TgenXYtNWdaBon1tgYCaebd+XPg0keyjEbEfkD4KkmAxkQ/i1vYvxdcT5nscLBfq9VJRmCBcFSw==} + engines: {node: '>= 0.10'} classic-level@1.4.1: resolution: {integrity: sha512-qGx/KJl3bvtOHrGau2WklEZuXhS3zme+jf+fsu6Ej7W7IP/C49v7KNlWIsT1jZu0YnfzSIYDGcEWpCa1wKGWXQ==} @@ -4832,8 +4860,8 @@ packages: cross-spawn@5.1.0: resolution: {integrity: sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==} - cross-spawn@7.0.3: - resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} crypt@0.0.2: @@ -4876,8 +4904,8 @@ packages: peerDependencies: cytoscape: ^3.2.0 - cytoscape@3.30.2: - resolution: {integrity: sha512-oICxQsjW8uSaRmn4UK/jkczKOqTrVqt5/1WL0POiJUT2EKNc9STM4hYFHv917yu55aTBMFNRzymlJhVAiWPCxw==} + cytoscape@3.30.4: + resolution: {integrity: sha512-OxtlZwQl1WbwMmLiyPSEBuzeTIQnwZhJYYWFzZ2PhEHVFwpeaqNIkUzSiso00D98qk60l8Gwon2RP304d3BJ1A==} engines: {node: '>=0.10'} d3-array@2.12.1: @@ -5044,8 +5072,8 @@ packages: resolution: {integrity: sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==} engines: {node: '>= 0.4'} - dataloader@2.2.2: - resolution: {integrity: sha512-8YnDaaf7N3k/q5HnTJVuzSyLETjoZjVmHc4AeKAzOvKHEFQKcn64OKBfzHYtE9zGjctNM7V9I0MfnUVLpi7M5g==} + dataloader@2.2.3: + resolution: {integrity: sha512-y2krtASINtPFS1rSDjacrFgn1dcUuoREVabwlOGOe4SdxenREqwjwjElAdwvbGM7kgZz9a3KVicWR7vcz8rnzA==} date-fns@4.1.0: resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==} @@ -5069,8 +5097,8 @@ packages: supports-color: optional: true - debug@4.3.7: - resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} + debug@4.4.0: + resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -5179,8 +5207,8 @@ packages: resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} engines: {node: '>=8'} - disposablestack@1.1.6: - resolution: {integrity: sha512-bah1d3fXuUrtjWEkS627JTKLejcozm69OVcHgjKBbnwmN2WkHlqVeH3caFmkaEZ6+lIEsIXmz9NLPK+coJFO5w==} + disposablestack@1.1.7: + resolution: {integrity: sha512-UmyM57A8fTz5Hn4pYO/q2YdQ7fApPmxT3T5eA3Igr4UnUZ/HY6zEWSUVR7QT6kiM4udOyljC8Ag2jn7DnaSUqA==} engines: {node: '>= 0.4'} dlv@1.1.3: @@ -5207,8 +5235,8 @@ packages: resolution: {integrity: sha512-vwEppIphpFdvaMCaHfCEv9IgwcxMljMw2TnAQBB4VWPvzXQLTb82jwmdOKzlEVUL3gNFT4l4TPKO+Bn+sqcrVQ==} engines: {node: '>=12'} - dotenv@16.4.5: - resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==} + dotenv@16.4.7: + resolution: {integrity: sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==} engines: {node: '>=12'} dottie@2.0.6: @@ -5218,6 +5246,10 @@ packages: resolution: {integrity: sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==} engines: {node: '>=4'} + dunder-proto@1.0.0: + resolution: {integrity: sha512-9+Sj30DIu+4KvHqMfLUGLFYL2PkURSYMVXJyXe92nFRvlYq5hBjLEhblKB+vkd/WVlUYMWigiY07T91Fkk0+4A==} + engines: {node: '>= 0.4'} + duplexify@4.1.3: resolution: {integrity: sha512-M3BmBhwJRZsSx38lZyhE53Csddgzl5R7xGJNk7CVddZD6CcmwMCH8J+7AprIrQKH7TonKxaCjcv27Qmf+sQ+oA==} @@ -5227,8 +5259,8 @@ packages: ee-first@1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} - electron-to-chromium@1.5.42: - resolution: {integrity: sha512-gIfKavKDw1mhvic9nbzA5lZw8QSHpdMwLwXc0cWidQz9B15pDoDdDH4boIatuFfeoCatb3a/NGL6CYRVFxGZ9g==} + electron-to-chromium@1.5.73: + resolution: {integrity: sha512-8wGNxG9tAG5KhGd3eeA0o6ixhiNdgr0DcHWm85XPCphwZgD1lIEoi6t3VERayWao7SF7AAZTw6oARGJeVjH8Kg==} elkjs@0.9.3: resolution: {integrity: sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ==} @@ -5236,8 +5268,11 @@ packages: elliptic@6.5.4: resolution: {integrity: sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==} - elliptic@6.5.7: - resolution: {integrity: sha512-ESVCtTwiA+XhY3wyh24QqRGBoP3rEdDUl3EDUUo9tft074fi19IrdpH7hLCMMP3CIj7jb3W96rn8lt/BqIlt5Q==} + elliptic@6.6.1: + resolution: {integrity: sha512-RaddvvMatK2LJHqFJ+YA4WysVN5Ita9E35botqIYspQ4TkRAlCicdzKOjlyv/1Za5RyTNn7di//eEV0uTAfe3g==} + + emoji-regex-xs@1.0.0: + resolution: {integrity: sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==} emoji-regex@10.4.0: resolution: {integrity: sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==} @@ -5287,20 +5322,20 @@ packages: error-stack-parser@2.1.4: resolution: {integrity: sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==} - es-abstract@1.23.3: - resolution: {integrity: sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==} + es-abstract@1.23.5: + resolution: {integrity: sha512-vlmniQ0WNPwXqA0BnmwV3Ng7HxiGlh6r5U6JcTMNx8OilcAGqVJBHJcPjqOMaczU9fRuRK5Px2BdVyPRnKMMVQ==} engines: {node: '>= 0.4'} - es-define-property@1.0.0: - resolution: {integrity: sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==} + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} engines: {node: '>= 0.4'} es-errors@1.3.0: resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} engines: {node: '>= 0.4'} - es-iterator-helpers@1.1.0: - resolution: {integrity: sha512-/SurEfycdyssORP/E+bj4sEu1CWw4EmLDsHynHwSXQ7utgbrMRWW195pTrCjFgFCddf/UkYm3oqKPRq5i8bJbw==} + es-iterator-helpers@1.2.0: + resolution: {integrity: sha512-tpxqxncxnpw3c93u8n3VOzACmRFoVmWJqbWXvX/JfKbkhBw1oslgPrUfeSt2psuqyEJFD6N/9lg5i7bsKpoq+Q==} engines: {node: '>= 0.4'} es-object-atoms@1.0.0: @@ -5314,8 +5349,8 @@ packages: es-shim-unscopables@1.0.2: resolution: {integrity: sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==} - es-to-primitive@1.2.1: - resolution: {integrity: sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==} + es-to-primitive@1.3.0: + resolution: {integrity: sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==} engines: {node: '>= 0.4'} esbuild@0.17.19: @@ -5360,8 +5395,8 @@ packages: eslint-import-resolver-node@0.3.9: resolution: {integrity: sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==} - eslint-import-resolver-typescript@3.6.3: - resolution: {integrity: sha512-ud9aw4szY9cCT1EWWdGv1L1XR6hh2PaRWif0j2QjQ0pgTY/69iw+W0Z4qZv5wHahOl8isEr+k/JnyAqNQkLkIA==} + eslint-import-resolver-typescript@3.7.0: + resolution: {integrity: sha512-Vrwyi8HHxY97K5ebydMtffsWAn1SCR9eol49eCd5fJS4O1WV7PaAjbcjmbfJJSMz/t4Mal212Uz/fQZrOB8mow==} engines: {node: ^14.18.0 || >=16.0.0} peerDependencies: eslint: '*' @@ -5410,8 +5445,8 @@ packages: '@typescript-eslint/parser': optional: true - eslint-plugin-jsx-a11y@6.10.1: - resolution: {integrity: sha512-zHByM9WTUMnfsDTafGXRiqxp6lFtNoSOWBY6FonVRn3A+BUwN1L/tdBXT40BcBJi0cZjOGTXZ0eD/rTG9fEJ0g==} + eslint-plugin-jsx-a11y@6.10.2: + resolution: {integrity: sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q==} engines: {node: '>=4.0'} peerDependencies: eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9 @@ -5527,6 +5562,7 @@ packages: ethereumjs-abi@0.6.8: resolution: {integrity: sha512-Tx0r/iXI6r+lRsdvkFDlut0N08jWMnKRZ6Gkq+Nmw75lZe4e6o3EkSnkaBP5NF6+m5PTGAr9JP43N3LyeoglsA==} + deprecated: This library has been deprecated and usage is discouraged. ethereumjs-util@6.2.1: resolution: {integrity: sha512-W2Ktez4L01Vexijrm5EB6w7dg4n/TgpoYU4avuT5T3Vmnw/eCRtiBrJfQYS/DCSvDIOLn2k57GcHdeBcgVxAqw==} @@ -5720,8 +5756,8 @@ packages: resolution: {integrity: sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==} hasBin: true - flatted@3.3.1: - resolution: {integrity: sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==} + flatted@3.3.2: + resolution: {integrity: sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==} follow-redirects@1.15.9: resolution: {integrity: sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==} @@ -5760,12 +5796,12 @@ packages: fraction.js@4.3.7: resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} - framer-motion@11.11.9: - resolution: {integrity: sha512-XpdZseuCrZehdHGuW22zZt3SF5g6AHJHJi7JwQIigOznW4Jg1n0oGPMJQheMaKLC+0rp5gxUKMRYI6ytd3q4RQ==} + framer-motion@11.14.4: + resolution: {integrity: sha512-NQuzr9JbeJDMQmy0FFLhLzk9h1kAjVC1tGE/HY4ubF02B95EBm2lpA21LE3Od/OpXqXgp0zl5Hdqu25hliBRsA==} peerDependencies: '@emotion/is-prop-valid': '*' - react: ^18.0.0 - react-dom: ^18.0.0 + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 peerDependenciesMeta: '@emotion/is-prop-valid': optional: true @@ -5846,8 +5882,8 @@ packages: get-func-name@2.0.2: resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} - get-intrinsic@1.2.4: - resolution: {integrity: sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==} + get-intrinsic@1.2.6: + resolution: {integrity: sha512-qxsEs+9A+u85HhllWJJFicJfPDhRmjzoYdl64aMWW9yRIJmSyxdn8IEkuIM530/7T+lv0TIHd8L6Q/ra0tEoeA==} engines: {node: '>= 0.4'} get-nonce@1.0.1: @@ -5930,8 +5966,9 @@ packages: glur@1.1.2: resolution: {integrity: sha512-l+8esYHTKOx2G/Aao4lEQ0bnHWg4fWtJbVoZZT9Knxi01pB8C80BR85nONLFwkkQoFRCmXY+BUcGZN3yZ2QsRA==} - gopd@1.0.1: - resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} @@ -5952,11 +5989,6 @@ packages: peerDependencies: graphql: '>=15' - graphql-jit@0.8.6: - resolution: {integrity: sha512-oVJteh/uYDpIA/M4UHrI+DmzPnX1zTD0a7Je++JA8q8P68L/KbuepimDyrT5FhL4HAq3filUxaFvfsL6/A4msw==} - peerDependencies: - graphql: '>=15' - graphql-jit@0.8.7: resolution: {integrity: sha512-KGzCrsxQPfEiXOUIJCexWKiWF6ycjO89kAO6SdO8OWRGwYXbG0hsLuTnbFfMq0gj7d7/ib/Gh7jtst7FHZEEjw==} peerDependencies: @@ -5974,8 +6006,8 @@ packages: peerDependencies: graphql: '>=0.11 <=16' - graphql-yoga@5.7.0: - resolution: {integrity: sha512-QyGVvFAvGhMrzjJvhjsxsyoE+e4lNrj5f5qOsRYJuWIjyw7tHfbBvybZIwzNOGY0aB5sgA8BlVvu5hxjdKJ5tQ==} + graphql-yoga@5.10.5: + resolution: {integrity: sha512-W5bpXHRb6S3H3Th8poDm6b+ZMRjke8hsy9WVFgRriDTGh0AenIkpJzZT5VgXUD+j1yLCMrvhUNc6MNmgaRExsw==} engines: {node: '>=18.0.0'} peerDependencies: graphql: ^15.2.0 || ^16.0.0 @@ -6035,12 +6067,12 @@ packages: has-property-descriptors@1.0.2: resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==} - has-proto@1.0.3: - resolution: {integrity: sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==} + has-proto@1.2.0: + resolution: {integrity: sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==} engines: {node: '>= 0.4'} - has-symbols@1.0.3: - resolution: {integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==} + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} engines: {node: '>= 0.4'} has-tostringtag@1.0.2: @@ -6065,8 +6097,8 @@ packages: resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} engines: {node: '>= 0.4'} - hast-util-from-dom@5.0.0: - resolution: {integrity: sha512-d6235voAp/XR3Hh5uy7aGLbM3S4KamdW0WEgOaU1YoewnuYw4HXb5eRtv9g65m/RFGEfUY1Mw4UqCc5Y8L4Stg==} + hast-util-from-dom@5.0.1: + resolution: {integrity: sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q==} hast-util-from-html-isomorphic@2.0.0: resolution: {integrity: sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==} @@ -6074,8 +6106,8 @@ packages: hast-util-from-html@2.0.3: resolution: {integrity: sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==} - hast-util-from-parse5@8.0.1: - resolution: {integrity: sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==} + hast-util-from-parse5@8.0.2: + resolution: {integrity: sha512-SfMzfdAi/zAoZ1KkFEyyeXBn7u/ShQrfd675ZEE9M3qj+PMFX05xubzRyF76CCSJu8au9jgVxDV1+okFvgZU4A==} hast-util-is-element@3.0.0: resolution: {integrity: sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==} @@ -6083,14 +6115,14 @@ packages: hast-util-parse-selector@4.0.0: resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==} - hast-util-raw@9.0.4: - resolution: {integrity: sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==} + hast-util-raw@9.1.0: + resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==} hast-util-to-estree@2.3.3: resolution: {integrity: sha512-ihhPIUPxN0v0w6M5+IiAZZrn0LH2uZomeWwhn7uP7avZC6TE7lIiEh2yBMPr5+zi1aUCXq6VoYRgs2Bw9xmycQ==} - hast-util-to-html@9.0.3: - resolution: {integrity: sha512-M17uBDzMJ9RPCqLMO92gNNUDuBSq10a25SDBI08iCCxmorf4Yy6sYHK57n9WAbRAAaU+DuR4W6GN9K4DFZesYg==} + hast-util-to-html@9.0.4: + resolution: {integrity: sha512-wxQzXtdbhiwGAUKrnQJXlOPmHnEehzphwkK7aluUPQ+lEc1xefC8pblMgpp2w5ldBTEfveRIrADcrhGIWrlTDA==} hast-util-to-parse5@8.0.0: resolution: {integrity: sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==} @@ -6104,8 +6136,8 @@ packages: hast-util-whitespace@3.0.0: resolution: {integrity: sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==} - hastscript@8.0.0: - resolution: {integrity: sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==} + hastscript@9.0.0: + resolution: {integrity: sha512-jzaLBGavEDKHrc5EfFImKN7nZKKBdSLIdGvCwDZ9TfzbF2ffXiov8CKE445L2Z1Ek2t/m4SKQ2j6Ipv7NyUolw==} he@1.2.0: resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} @@ -6150,8 +6182,8 @@ packages: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} - https-proxy-agent@7.0.5: - resolution: {integrity: sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==} + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} engines: {node: '>= 14'} human-signals@5.0.0: @@ -6230,8 +6262,8 @@ packages: resolution: {integrity: sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg==} engines: {node: '>=12.0.0'} - internal-slot@1.0.7: - resolution: {integrity: sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==} + internal-slot@1.1.0: + resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==} engines: {node: '>= 0.4'} internmap@1.0.1: @@ -6241,8 +6273,8 @@ packages: resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} engines: {node: '>=12'} - intl-messageformat@10.7.1: - resolution: {integrity: sha512-xQuJW2WcyzNJZWUu5xTVPOmNSA1Sowuu/NKFdUid5Fxx/Yl6/s4DefTU/y7zy+irZLDmFGmTLtnM8FqpN05wlA==} + intl-messageformat@10.7.10: + resolution: {integrity: sha512-hp7iejCBiJdW3zmOe18FdlJu8U/JsADSDiBPQhfdSeI8B9POtvPRvPh3nMlvhYayGMKLv6maldhR7y3Pf1vkpw==} invariant@2.2.4: resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} @@ -6288,15 +6320,16 @@ packages: resolution: {integrity: sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==} engines: {node: '>= 0.4'} - is-bigint@1.0.4: - resolution: {integrity: sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==} + is-bigint@1.1.0: + resolution: {integrity: sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==} + engines: {node: '>= 0.4'} is-binary-path@2.1.0: resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} engines: {node: '>=8'} - is-boolean-object@1.1.2: - resolution: {integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==} + is-boolean-object@1.2.1: + resolution: {integrity: sha512-l9qO6eFlUETHtuihLcYOaLKByJ1f+N4kthcU9YjHy3N+B3hWv0y/2Nd0mu/7lTFnRQHTrSdXF50HQ3bl5fEnng==} engines: {node: '>= 0.4'} is-buffer@1.1.6: @@ -6306,23 +6339,23 @@ packages: resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} engines: {node: '>=4'} - is-bun-module@1.2.1: - resolution: {integrity: sha512-AmidtEM6D6NmUiLOvvU7+IePxjEjOzra2h0pSrsfSAcXwl/83zLLXDByafUJy9k/rKK0pvXMLdwKwGHlX2Ke6Q==} + is-bun-module@1.3.0: + resolution: {integrity: sha512-DgXeu5UWI0IsMQundYb5UAOzm6G2eVnarJ0byP6Tm55iZNKceD59LNPA2L4VvsScTtHcw0yEkVwSf7PC+QoLSA==} is-callable@1.2.7: resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} engines: {node: '>= 0.4'} - is-core-module@2.15.1: - resolution: {integrity: sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==} + is-core-module@2.16.0: + resolution: {integrity: sha512-urTSINYfAYgcbLb0yDQ6egFm6h3Mo1DcF9EkyXSRjjzdHbsulg01qhwWuXdOoUBuTkbQ80KDboXa0vFJ+BDH+g==} engines: {node: '>= 0.4'} - is-data-view@1.0.1: - resolution: {integrity: sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==} + is-data-view@1.0.2: + resolution: {integrity: sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==} engines: {node: '>= 0.4'} - is-date-object@1.0.5: - resolution: {integrity: sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==} + is-date-object@1.1.0: + resolution: {integrity: sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==} engines: {node: '>= 0.4'} is-decimal@1.0.4: @@ -6347,8 +6380,9 @@ packages: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} - is-finalizationregistry@1.0.2: - resolution: {integrity: sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==} + is-finalizationregistry@1.1.0: + resolution: {integrity: sha512-qfMdqbAQEwBw78ZyReKnlA8ezmPdb9BemzIIip/JkjaZUhitfXDkkr+3QTboW0JrSXT1QWyYShpvnNHGZ4c4yA==} + engines: {node: '>= 0.4'} is-fullwidth-code-point@3.0.0: resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} @@ -6390,8 +6424,8 @@ packages: resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==} engines: {node: '>= 0.4'} - is-number-object@1.0.7: - resolution: {integrity: sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==} + is-number-object@1.1.0: + resolution: {integrity: sha512-KVSZV0Dunv9DTPkhXwcZ3Q+tUc9TsaE1ZwX5J2WMvsSGS6Md8TFPun5uwh0yRdrNerI6vf/tbJxqSx4c1ZI1Lw==} engines: {node: '>= 0.4'} is-number@7.0.0: @@ -6424,11 +6458,11 @@ packages: is-property@1.0.2: resolution: {integrity: sha512-Ks/IoX00TtClbGQr4TWXemAnktAQvYB7HzcCxDGqEZU6oCmb2INHuOoKxbtR+HFkmYWBKv/dOZtGRiAjDhj92g==} - is-reference@3.0.2: - resolution: {integrity: sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==} + is-reference@3.0.3: + resolution: {integrity: sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==} - is-regex@1.1.4: - resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} + is-regex@1.2.1: + resolution: {integrity: sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==} engines: {node: '>= 0.4'} is-relative@1.0.0: @@ -6451,12 +6485,12 @@ packages: resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - is-string@1.0.7: - resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} + is-string@1.1.0: + resolution: {integrity: sha512-PlfzajuF9vSo5wErv3MJAKD/nqf9ngAs1NFQYm16nUYFO2IzxJ2hcm+IOCg+EEopdykNNUhVq5cz35cAUxU8+g==} engines: {node: '>= 0.4'} - is-symbol@1.0.4: - resolution: {integrity: sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==} + is-symbol@1.1.1: + resolution: {integrity: sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==} engines: {node: '>= 0.4'} is-typed-array@1.1.13: @@ -6478,8 +6512,9 @@ packages: resolution: {integrity: sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==} engines: {node: '>= 0.4'} - is-weakref@1.0.2: - resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} + is-weakref@1.1.0: + resolution: {integrity: sha512-SXM8Nwyys6nT5WP6pltOwKytLV7FqQ4UiibxVmW+EIosHcmCqkkjViTb5SNssDlkCiEYRP1/pdWUKVvZBmsR2Q==} + engines: {node: '>= 0.4'} is-weakset@2.0.3: resolution: {integrity: sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==} @@ -6508,8 +6543,8 @@ packages: peerDependencies: ws: '*' - iterator.prototype@1.1.3: - resolution: {integrity: sha512-FW5iMbeQ6rBGm/oKgzq2aW4KvAGpxPzYES8N4g4xNXUKpL1mclMvOe+76AcLDTvD+Ze+sOpVhgdAQEKF4L9iGQ==} + iterator.prototype@1.1.4: + resolution: {integrity: sha512-x4WH0BWmrMmg4oHHl+duwubhrvczGlyuGAZu3nvrf0UXOfPu8IhZObFEr7DE/iv01YgVZrsOiRcqw2srkKEDIA==} engines: {node: '>= 0.4'} itty-time@1.0.6: @@ -6550,8 +6585,8 @@ packages: js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} - js-tokens@9.0.0: - resolution: {integrity: sha512-WriZw1luRMlmV3LGJaR6QOJjWwgLUTf89OwT2lUOyjX2dJGBwgmIkbcz+7WFZjrZM635JOIR517++e/67CP9dQ==} + js-tokens@9.0.1: + resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} js-yaml@3.14.1: resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} @@ -6576,8 +6611,8 @@ packages: canvas: optional: true - jsesc@3.0.2: - resolution: {integrity: sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==} + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} engines: {node: '>=6'} hasBin: true @@ -6637,8 +6672,8 @@ packages: resolution: {integrity: sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==} engines: {node: '>=4.0'} - katex@0.16.11: - resolution: {integrity: sha512-RQrI8rlHY92OLf3rho/Ts8i/XvjgguEjOkO1BEXcU3N8BqPpSzBNwV/G0Ukr+P/l3ivvJUE/Fa/CwbS6HesGNQ==} + katex@0.16.15: + resolution: {integrity: sha512-yE9YJIEAk2aZ+FL/G8r+UGw0CTUzEA8ZFy6E+8tc3spHUKq3qBnzCkI1CQwGoI9atJhVyFPEypQsTY7mJ1Pi9w==} hasBin: true keccak@3.0.4: @@ -6695,12 +6730,8 @@ packages: lie@3.1.1: resolution: {integrity: sha512-RiNhHysUjhrDQntfYSfY4MU24coXXdEOgw9WGcKHNeEwffDYbF//u87M1EWaMGzuFoSbqW0C9C6lEEhDOAswfw==} - lilconfig@2.1.0: - resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==} - engines: {node: '>=10'} - - lilconfig@3.1.2: - resolution: {integrity: sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==} + lilconfig@3.1.3: + resolution: {integrity: sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==} engines: {node: '>=14'} linebreak@1.1.0: @@ -6720,8 +6751,8 @@ packages: resolution: {integrity: sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - local-pkg@0.5.0: - resolution: {integrity: sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==} + local-pkg@0.5.1: + resolution: {integrity: sha512-9rrA30MRRP3gBD3HTGnC6cDFpaE1kVDWxWgqWJUN0RvDNAo+Nz/9GxB+nHOH0ifbVFy0hSA1V6vFDvnx54lTEQ==} engines: {node: '>=14'} localforage@1.10.0: @@ -6801,8 +6832,8 @@ packages: magic-string@0.25.9: resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==} - magic-string@0.30.12: - resolution: {integrity: sha512-Ea8I3sQMVXr8JhN4z+H/d8zwo+tYDgHE9+5G4Wnrwhs0gaK9fXTKx0Tw5Xwsd/bCPTTZNRAdpyzvoeORe9LYpw==} + magic-string@0.30.15: + resolution: {integrity: sha512-zXeaYRgZ6ldS1RJJUrMrYgNJ4fdwnyI6tVqoiIhyCyv5IVTK9BU8Ic2l253GGETQHxI4HNUwhJ3fjDhKqEoaAw==} make-dir@3.1.0: resolution: {integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==} @@ -6829,6 +6860,10 @@ packages: markdown-table@3.0.4: resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} + math-intrinsics@1.0.0: + resolution: {integrity: sha512-4MqMiKP90ybymYvsut0CH2g4XWbfLtmlCkXmtmdcDCxNB+mQcu1w/1+L/VD7vi/PSv7X2JYV7SCcR+jiPXnQtA==} + engines: {node: '>= 0.4'} + mcl-wasm@0.7.9: resolution: {integrity: sha512-iJIUcQWA88IJB/5L15GnJVnSQJmf/YaxxV6zRavv83HILHaJQb6y0iFyDMdDO0gN8X37tdxmAOrH/P8B6RB8sQ==} engines: {node: '>=8.9.0'} @@ -6851,8 +6886,8 @@ packages: mdast-util-from-markdown@1.3.1: resolution: {integrity: sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==} - mdast-util-from-markdown@2.0.1: - resolution: {integrity: sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==} + mdast-util-from-markdown@2.0.2: + resolution: {integrity: sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==} mdast-util-frontmatter@2.0.1: resolution: {integrity: sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==} @@ -6905,8 +6940,8 @@ packages: mdast-util-to-markdown@1.5.0: resolution: {integrity: sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A==} - mdast-util-to-markdown@2.1.0: - resolution: {integrity: sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==} + mdast-util-to-markdown@2.1.2: + resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==} mdast-util-to-string@2.0.0: resolution: {integrity: sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==} @@ -6964,8 +6999,8 @@ packages: micromark-core-commonmark@1.1.0: resolution: {integrity: sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==} - micromark-core-commonmark@2.0.1: - resolution: {integrity: sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA==} + micromark-core-commonmark@2.0.2: + resolution: {integrity: sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w==} micromark-extension-frontmatter@2.0.0: resolution: {integrity: sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==} @@ -7012,14 +7047,14 @@ packages: micromark-factory-destination@1.1.0: resolution: {integrity: sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==} - micromark-factory-destination@2.0.0: - resolution: {integrity: sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==} + micromark-factory-destination@2.0.1: + resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==} micromark-factory-label@1.1.0: resolution: {integrity: sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==} - micromark-factory-label@2.0.0: - resolution: {integrity: sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==} + micromark-factory-label@2.0.1: + resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==} micromark-factory-mdx-expression@1.0.9: resolution: {integrity: sha512-jGIWzSmNfdnkJq05c7b0+Wv0Kfz3NJ3N4cBjnbO4zjXIlxJr+f8lk+5ZmwFvqdAbUy2q6B5rCY//g0QAAaXDWA==} @@ -7027,62 +7062,62 @@ packages: micromark-factory-space@1.1.0: resolution: {integrity: sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==} - micromark-factory-space@2.0.0: - resolution: {integrity: sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==} + micromark-factory-space@2.0.1: + resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==} micromark-factory-title@1.1.0: resolution: {integrity: sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==} - micromark-factory-title@2.0.0: - resolution: {integrity: sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==} + micromark-factory-title@2.0.1: + resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==} micromark-factory-whitespace@1.1.0: resolution: {integrity: sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==} - micromark-factory-whitespace@2.0.0: - resolution: {integrity: sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==} + micromark-factory-whitespace@2.0.1: + resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==} micromark-util-character@1.2.0: resolution: {integrity: sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==} - micromark-util-character@2.1.0: - resolution: {integrity: sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==} + micromark-util-character@2.1.1: + resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} micromark-util-chunked@1.1.0: resolution: {integrity: sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==} - micromark-util-chunked@2.0.0: - resolution: {integrity: sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==} + micromark-util-chunked@2.0.1: + resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==} micromark-util-classify-character@1.1.0: resolution: {integrity: sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==} - micromark-util-classify-character@2.0.0: - resolution: {integrity: sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==} + micromark-util-classify-character@2.0.1: + resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==} micromark-util-combine-extensions@1.1.0: resolution: {integrity: sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==} - micromark-util-combine-extensions@2.0.0: - resolution: {integrity: sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==} + micromark-util-combine-extensions@2.0.1: + resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==} micromark-util-decode-numeric-character-reference@1.1.0: resolution: {integrity: sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==} - micromark-util-decode-numeric-character-reference@2.0.1: - resolution: {integrity: sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==} + micromark-util-decode-numeric-character-reference@2.0.2: + resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==} micromark-util-decode-string@1.1.0: resolution: {integrity: sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==} - micromark-util-decode-string@2.0.0: - resolution: {integrity: sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA==} + micromark-util-decode-string@2.0.1: + resolution: {integrity: sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==} micromark-util-encode@1.1.0: resolution: {integrity: sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==} - micromark-util-encode@2.0.0: - resolution: {integrity: sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==} + micromark-util-encode@2.0.1: + resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} micromark-util-events-to-acorn@1.2.3: resolution: {integrity: sha512-ij4X7Wuc4fED6UoLWkmo0xJQhsktfNh1J0m8g4PbIMPlx+ek/4YdW5mvbye8z/aZvAPUoxgXHrwVlXAPKMRp1w==} @@ -7090,44 +7125,44 @@ packages: micromark-util-html-tag-name@1.2.0: resolution: {integrity: sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==} - micromark-util-html-tag-name@2.0.0: - resolution: {integrity: sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==} + micromark-util-html-tag-name@2.0.1: + resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==} micromark-util-normalize-identifier@1.1.0: resolution: {integrity: sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==} - micromark-util-normalize-identifier@2.0.0: - resolution: {integrity: sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==} + micromark-util-normalize-identifier@2.0.1: + resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==} micromark-util-resolve-all@1.1.0: resolution: {integrity: sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==} - micromark-util-resolve-all@2.0.0: - resolution: {integrity: sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==} + micromark-util-resolve-all@2.0.1: + resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==} micromark-util-sanitize-uri@1.2.0: resolution: {integrity: sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==} - micromark-util-sanitize-uri@2.0.0: - resolution: {integrity: sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==} + micromark-util-sanitize-uri@2.0.1: + resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} micromark-util-subtokenize@1.1.0: resolution: {integrity: sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==} - micromark-util-subtokenize@2.0.1: - resolution: {integrity: sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q==} + micromark-util-subtokenize@2.0.3: + resolution: {integrity: sha512-VXJJuNxYWSoYL6AJ6OQECCFGhIU2GGHMw8tahogePBrjkG8aCCas3ibkp7RnVOSTClg2is05/R7maAhF1XyQMg==} micromark-util-symbol@1.1.0: resolution: {integrity: sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==} - micromark-util-symbol@2.0.0: - resolution: {integrity: sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==} + micromark-util-symbol@2.0.1: + resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} micromark-util-types@1.1.0: resolution: {integrity: sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==} - micromark-util-types@2.0.0: - resolution: {integrity: sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==} + micromark-util-types@2.0.1: + resolution: {integrity: sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ==} micromark@2.11.4: resolution: {integrity: sha512-+WoovN/ppKolQOFIAajxi7Lu9kInbPxFuTBVEavFcL8eAfVstoc5MocPmqBeAdBOJV00uaVjegzH4+MA0DN/uA==} @@ -7135,8 +7170,8 @@ packages: micromark@3.2.0: resolution: {integrity: sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==} - micromark@4.0.0: - resolution: {integrity: sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==} + micromark@4.0.1: + resolution: {integrity: sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw==} micromatch@4.0.8: resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} @@ -7168,8 +7203,8 @@ packages: resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==} engines: {node: '>=12'} - miniflare@3.20241106.0: - resolution: {integrity: sha512-PjOoJKjUUofCueQskfhXlGvvHxZj36UAJAp1DnquMK88MFF50zCULblh0KXMSNM+bXeQYA94Gj06a7kfmBGxPw==} + miniflare@3.20241205.0: + resolution: {integrity: sha512-Z0cTtIf6ZrcAJ3SrOI9EUM3s4dkGhNeU6Ubl8sroYhsPVD+rtz3m5+p6McHFWCkcMff1o60X5XEKVTmkz0gbpA==} engines: {node: '>=16.13'} hasBin: true @@ -7249,14 +7284,14 @@ packages: engines: {node: '>=10'} hasBin: true - mlly@1.7.2: - resolution: {integrity: sha512-tN3dvVHYVz4DhSXinXIk7u9syPYaJvio118uomkovAtWBT+RdbP6Lfh/5Lvo519YMmwBafwlh20IPTXIStscpA==} + mlly@1.7.3: + resolution: {integrity: sha512-xUsx5n/mN0uQf4V548PKQ+YShA4/IW0KI1dZhrNrPCLG+xizETbHTkOa1f8/xut9JRPp8kQuMnz0oqwkTiLo/A==} mnemonist@0.38.5: resolution: {integrity: sha512-bZTFT5rrPKtPJxj8KSV0WkPyNxl72vQepqqVUAW2ARUpUSF2qXMB6jZj7hW5/k7C1rtpzqbD/IIbJwLXUjCHeg==} - mocha@10.7.3: - resolution: {integrity: sha512-uQWxAu44wwiACGqjbPYmjo7Lg8sFrS3dQe7PP2FQI+woptP4vZXSMcfMyFL/e1yFEeEpV4RtyTpZROOKmxis+A==} + mocha@10.8.2: + resolution: {integrity: sha512-VZlYo/WE8t1tstuRmqgeyBgCbJc/lEdopaa+axcKzTBJ+UIdlAB9XnmvTCAH4pwR4ElNInaedhEBmZD8iCSVEg==} engines: {node: '>= 14.0.0'} hasBin: true @@ -7274,6 +7309,26 @@ packages: resolution: {integrity: sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==} engines: {node: '>= 0.8.0'} + motion-dom@11.14.3: + resolution: {integrity: sha512-lW+D2wBy5vxLJi6aCP0xyxTxlTfiu+b+zcpVbGVFUxotwThqhdpPRSmX8xztAgtZMPMeU0WGVn/k1w4I+TbPqA==} + + motion-utils@11.14.3: + resolution: {integrity: sha512-Xg+8xnqIJTpr0L/cidfTTBFkvRw26ZtGGuIhA94J9PQ2p4mEa06Xx7QVYZH0BP+EpMSaDlu+q0I0mmvwADPsaQ==} + + motion@11.14.4: + resolution: {integrity: sha512-ZIaw6ko88B8rSmBEFzqbTCQMbo9xMu8f4PSXSGdb9DTDy8R0sXcbwMEKmTEYkrj9TmZ4n+Ebd0KYjtqHgzRkRQ==} + peerDependencies: + '@emotion/is-prop-valid': '*' + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + peerDependenciesMeta: + '@emotion/is-prop-valid': + optional: true + react: + optional: true + react-dom: + optional: true + mri@1.2.0: resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} engines: {node: '>=4'} @@ -7303,8 +7358,8 @@ packages: react: '*' react-dom: '*' - nanoid@3.3.7: - resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} + nanoid@3.3.8: + resolution: {integrity: sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==} engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} hasBin: true @@ -7343,8 +7398,8 @@ packages: peerDependencies: next: '*' - next@14.2.18: - resolution: {integrity: sha512-H9qbjDuGivUDEnK6wa+p2XKO+iMzgVgyr9Zp/4Iv29lKa+DYaxJGjOeEA+5VOvJh/M7HLiskehInSa0cWxVXUw==} + next@14.2.20: + resolution: {integrity: sha512-yPvIiWsiyVYqJlSQxwmzMIReXn5HxFNq4+tlVQ812N1FbvhmE+fDpIAD7bcS2mGYQwPJ5vAsQouyme2eKsxaug==} engines: {node: '>=18.17.0'} hasBin: true peerDependencies: @@ -7395,8 +7450,8 @@ packages: resolution: {integrity: sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==} engines: {node: '>= 6.13.0'} - node-gyp-build@4.8.2: - resolution: {integrity: sha512-IRUxE4BVsHWXkV/SFOut4qTlagw2aM8T5/vnTsmrHJvVoKueJHRc/JaFND7QDDc61kLYUJ6qlZM3sqTSyx2dTw==} + node-gyp-build@4.8.4: + resolution: {integrity: sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ==} hasBin: true node-gyp@10.0.1: @@ -7411,8 +7466,8 @@ packages: resolution: {integrity: sha512-v+u+OgSq6ldvf8MrdjieAy/mv8WeTN94nrTomh62zhItF2HH0Ckin/QEqs8+35DWyYrE5nBM2480UtWVXktzbQ==} engines: {node: '>=16.14'} - node-releases@2.0.18: - resolution: {integrity: sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==} + node-releases@2.0.19: + resolution: {integrity: sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==} non-layered-tidy-tree-layout@2.0.2: resolution: {integrity: sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw==} @@ -7470,8 +7525,8 @@ packages: numeral@2.0.6: resolution: {integrity: sha512-qaKRmtYPZ5qdw4jWJD6bxEf1FJEqllJrwxCLIm0sQU/A7v2/czigzOb+C2uSiFsa9lBUzeH7M1oK+Q+OLxL3kA==} - nwsapi@2.2.13: - resolution: {integrity: sha512-cTGB9ptp9dY9A5VbMSe7fQBcl/tt22Vcqdq8+eN93rblOuE0aCFu4aZ2vMwct/2t+lFnosm8RkQW1I0Omb1UtQ==} + nwsapi@2.2.16: + resolution: {integrity: sha512-F1I/bimDpj3ncaNDhfyMWuFqmQDBwDB0Fogc2qpL3BWvkQteFD/8BzWuIRl83rq0DXfm8SGt/HFhLXZyljTXcQ==} object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} @@ -7488,6 +7543,10 @@ packages: resolution: {integrity: sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==} engines: {node: '>= 0.4'} + object-inspect@1.13.3: + resolution: {integrity: sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==} + engines: {node: '>= 0.4'} + object-keys@1.1.1: resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} engines: {node: '>= 0.4'} @@ -7544,16 +7603,13 @@ packages: resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} engines: {node: '>=12'} - oniguruma-to-js@0.4.3: - resolution: {integrity: sha512-X0jWUcAlxORhOqqBREgPMgnshB7ZGYszBNspP+tS9hPD3l13CdaXcHbgImoHUHlrvGx/7AvFEkTRhAGYh+jzjQ==} + oniguruma-to-es@0.7.0: + resolution: {integrity: sha512-HRaRh09cE0gRS3+wi2zxekB+I5L8C/gN60S+vb11eADHUaB/q4u8wGGOX3GvwvitG8ixaeycZfeoyruKQzUgNg==} open@7.4.2: resolution: {integrity: sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==} engines: {node: '>=8'} - optimism@0.18.0: - resolution: {integrity: sha512-tGn8+REwLRNFnb9WmcY5IfpOqeX2kpaYJ1s6Ae3mn12AeydLkR3j+jSCmVQFoXqU8D41PAJ1RG1rCRNWmNZVmQ==} - optionator@0.9.4: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} engines: {node: '>= 0.8.0'} @@ -7632,8 +7688,8 @@ packages: parse-entities@2.0.0: resolution: {integrity: sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==} - parse-entities@4.0.1: - resolution: {integrity: sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==} + parse-entities@4.0.2: + resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==} parse-filepath@1.0.2: resolution: {integrity: sha512-FwdRXKCohSVeXqwtYonZTXtbGJKrn+HNyWDYVcp5yuJlesTwNH4rsmRZ+GrKAPJ5bLpRxESMeS+Rl0VCHRvB2Q==} @@ -7650,8 +7706,8 @@ packages: parse-numeric-range@1.3.0: resolution: {integrity: sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==} - parse5@7.2.0: - resolution: {integrity: sha512-ZkDsAOcxsUMZ4Lz5fVciOehNcJ+Gb8gTzcA4yl3wnc273BAybYWrQ+Ks/OjCjSEpjvQkDSeZbybK9qj2VHHdGA==} + parse5@7.2.1: + resolution: {integrity: sha512-BuBYQYlv1ckiPdQi/ohiivi9Sagc9JG+Ozs0r7b/0iK3sKmrb0b9FdWdBbOdx6hBCM/F9Ir82ofnBhtZOjCRPQ==} parseurl@1.3.3: resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} @@ -7965,8 +8021,8 @@ packages: engines: {node: '>=10.13.0'} hasBin: true - prettier@3.3.3: - resolution: {integrity: sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==} + prettier@3.4.2: + resolution: {integrity: sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==} engines: {node: '>=14'} hasBin: true @@ -7977,8 +8033,8 @@ packages: printable-characters@1.0.42: resolution: {integrity: sha512-dKp+C4iXWK4vVYZmYSd0KBH5F/h1HoZRsbJ82AVKRO3PEo8L4lBS/vLwhVtpwwuYcoIsVY+1JYKR268yn480uQ==} - prism-react-renderer@2.4.0: - resolution: {integrity: sha512-327BsVCD/unU4CNLZTWVHyUHKnsqcvj2qbPlQ8MiBE2eq2rgctjigPA1Gp9HLF83kZ20zNN6jgizHJeEsyFYOw==} + prism-react-renderer@2.4.1: + resolution: {integrity: sha512-ey8Ls/+Di31eqzUxC46h8MksNuGx/n0AAC8uKpwFau4RPDYLuE3EXTp8N8G2vX2N7UC/+IXeNUnlWBGGcAG+Ig==} peerDependencies: react: '>=16.0.0' @@ -8022,14 +8078,11 @@ packages: resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} engines: {node: '>= 0.10'} - proxy-from-env@1.1.0: - resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} - pseudomap@1.0.2: resolution: {integrity: sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==} - psl@1.9.0: - resolution: {integrity: sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==} + psl@1.15.0: + resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==} pump@3.0.2: resolution: {integrity: sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==} @@ -8045,8 +8098,8 @@ packages: resolution: {integrity: sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==} engines: {node: '>=0.6'} - qs@6.13.0: - resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} + qs@6.13.1: + resolution: {integrity: sha512-EJPeIn0CYrGu+hli1xilKAPXODtJ12T0sP63Ijx2/khC2JtuaN3JyNIpvmnkmaEtha9ocbG4A4cMcr+TvqvwQg==} engines: {node: '>=0.6'} querystringify@2.2.0: @@ -8076,11 +8129,11 @@ packages: resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} engines: {node: '>= 0.8'} - react-aria@3.35.1: - resolution: {integrity: sha512-MQTvt0xbcKpnceKkYUtPMbaD9IQj2BXTrwk2vP/V7ph3EVhcyJTUdy1LXCqf8oR8bXE2BERUqp7rzJ+vYy5C+w==} + react-aria@3.36.0: + resolution: {integrity: sha512-AK5XyIhAN+e5HDlwlF+YwFrOrVI7RYmZ6kg/o7ZprQjkYqYKapXeUpWscmNm/3H2kDboE5Z4ymUnK6ZhobLqOw==} peerDependencies: - react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 - react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0 + react: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 + react-dom: ^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1 react-device-detect@2.2.3: resolution: {integrity: sha512-buYY3qrCnQVlIFHrC5UcUoAj7iANs/+srdkwsnNjI7anr3Tt7UY6MqNxtMLlr0tMBied0O49UZVK8XKs3ZIiPw==} @@ -8142,8 +8195,8 @@ packages: '@types/react': optional: true - react-smooth@4.0.1: - resolution: {integrity: sha512-OE4hm7XqR0jNOq3Qmk9mFLyd6p2+j6bvbPJ7qlB7+oo0eNcL2l7WQzG6MBnT3EXY6xzkLMUBec3AfewJdA0J8w==} + react-smooth@4.0.3: + resolution: {integrity: sha512-PyxIrra8WZWrMRFcCiJsZ+JqFaxEINAt+v/w++wQKQlmO99Eh3+JTLeKApdTsLX2roBdWYXqPsaS8sO4UmdzIg==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 @@ -8170,14 +8223,14 @@ packages: react: '*' tslib: '*' - react-use@17.5.1: - resolution: {integrity: sha512-LG/uPEVRflLWMwi3j/sZqR00nF6JGqTTDblkXK2nzXsIvij06hXl1V/MZIlwj1OKIQUtlh1l9jK8gLsRyCQxMg==} + react-use@17.6.0: + resolution: {integrity: sha512-OmedEScUMKFfzn1Ir8dBxiLLSOzhKe/dPZwVxcujweSj45aNM7BEGPb9BEVIgVEqEXx6f3/TsXzwIktNgUR02g==} peerDependencies: react: '*' react-dom: '*' - react-virtuoso@4.12.0: - resolution: {integrity: sha512-oHrKlU7xHsrnBQ89ecZoMPAK0tHnI9s1hsFW3KKg5ZGeZ5SWvbGhg/QFJFY4XETAzoCUeu+Xaxn1OUb/PGtPlA==} + react-virtuoso@4.12.3: + resolution: {integrity: sha512-6X1p/sU7hecmjDZMAwN+r3go9EVjofKhwkUbVlL8lXhBZecPv9XVCkZ/kBPYOr0Mv0Vl5+Ziwgexg9Kh7+NNXQ==} engines: {node: '>=10'} peerDependencies: react: '>=16 || >=17 || >= 18' @@ -8220,38 +8273,33 @@ packages: recharts-scale@0.4.5: resolution: {integrity: sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==} - recharts@2.13.0: - resolution: {integrity: sha512-sbfxjWQ+oLWSZEWmvbq/DFVdeRLqqA6d0CDjKx2PkxVVdoXo16jvENCE+u/x7HxOO+/fwx//nYRwb8p8X6s/lQ==} + recharts@2.15.0: + resolution: {integrity: sha512-cIvMxDfpAmqAmVgc4yb7pgm/O1tmmkl/CjrvXuW+62/+7jj/iF9Ykm+hb/UJt42TREHMyd3gb+pkgoa2MxgDIw==} engines: {node: '>=14'} peerDependencies: - react: ^16.0.0 || ^17.0.0 || ^18.0.0 - react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0 + react: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - reflect.getprototypeof@1.0.6: - resolution: {integrity: sha512-fmfw4XgoDke3kdI6h4xcUz1dG8uaiv5q9gcEwLS4Pnth2kxT+GZ7YehS1JTMGBQmtV7Y4GFGbs2re2NqhdozUg==} + reflect.getprototypeof@1.0.8: + resolution: {integrity: sha512-B5dj6usc5dkk8uFliwjwDHM8To5/QwdKz9JcBZ8Ic4G1f0YmeeJTtE/ZTdgRFPAfxZFiUaPhZ1Jcs4qeagItGQ==} engines: {node: '>= 0.4'} regenerator-runtime@0.14.1: resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} - regex@4.3.3: - resolution: {integrity: sha512-r/AadFO7owAq1QJVeZ/nq9jNS1vyZt+6t1p/E59B56Rn2GCya+gr1KSyOzNL/er+r+B7phv5jG2xU2Nz1YkmJg==} + regex-recursion@4.3.0: + resolution: {integrity: sha512-5LcLnizwjcQ2ALfOj95MjcatxyqF5RPySx9yT+PaXu3Gox2vyAtLDjHB8NTJLtMGkvyau6nI3CfpwFCjPUIs/A==} + + regex-utilities@2.3.0: + resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==} + + regex@5.0.2: + resolution: {integrity: sha512-/pczGbKIQgfTMRV0XjABvc5RzLqQmwqxLHdQao2RTXPk+pmTXB2P0IaUHYdYyk412YLwUIkaeMd5T+RzVgTqnQ==} regexp.prototype.flags@1.5.3: resolution: {integrity: sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ==} engines: {node: '>= 0.4'} - rehackt@0.1.0: - resolution: {integrity: sha512-7kRDOuLHB87D/JESKxQoRwv4DzbIdwkAGQ7p6QKGdVlY1IZheUnVhlk/4UZlNUVxdAXpyxikE3URsG067ybVzw==} - peerDependencies: - '@types/react': '*' - react: '*' - peerDependenciesMeta: - '@types/react': - optional: true - react: - optional: true - rehype-katex@7.0.1: resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} @@ -8331,25 +8379,17 @@ packages: resolve-pkg-maps@1.0.0: resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} - resolve.exports@2.0.2: - resolution: {integrity: sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==} - engines: {node: '>=10'} - resolve@1.17.0: resolution: {integrity: sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==} - resolve@1.22.8: - resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} + resolve@1.22.9: + resolution: {integrity: sha512-QxrmX1DzraFIi9PxdG5VkRfRwIgjwyud+z/iBwfRRrVmHc+P9Q7u2lSSpQ6bjr2gy5lrqIiU9vb6iAeGf2400A==} hasBin: true resolve@2.0.0-next.5: resolution: {integrity: sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==} hasBin: true - response-iterator@0.2.6: - resolution: {integrity: sha512-pVzEEzrsg23Sh053rmDUvLSkGXluZio0qu8VT6ukrYuvtjVfCbDZH9d6PGXb8HZfzdNZt8feXv/jvUzlhRgLnw==} - engines: {node: '>=0.8'} - restore-cursor@3.1.0: resolution: {integrity: sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==} engines: {node: '>=8'} @@ -8407,19 +8447,19 @@ packages: rollup-pluginutils@2.8.2: resolution: {integrity: sha512-EEp9NhnUkwY8aif6bxgovPHMoMoNr2FulJziTndpt5H9RdwC47GSGuII9XxpSdzVGM0GWrNPHV6ie1LTNJPaLQ==} - rollup@4.24.0: - resolution: {integrity: sha512-DOmrlGSXNk1DM0ljiQA+i+o0rSLhtii1je5wgk60j49d1jHT5YYttBv1iWOnYSTG+fZZESUOSNiAl89SIet+Cg==} + rollup@4.28.1: + resolution: {integrity: sha512-61fXYl/qNVinKmGSTHAZ6Yy8I3YIJC/r2m9feHo6SwVAVcLT5MPwOUFe7EuURA/4m0NR8lXG4BBXuo/IZEsjMg==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true - rrdom@2.0.0-alpha.17: - resolution: {integrity: sha512-b6caDiNcFO96Opp7TGdcVd4OLGSXu5dJe+A0IDiAu8mk7OmhqZCSDlgQdTKmdO5wMf4zPsUTgb8H/aNvR3kDHA==} + rrdom@2.0.0-alpha.18: + resolution: {integrity: sha512-fSFzFFxbqAViITyYVA4Z0o5G6p1nEqEr/N8vdgSKie9Rn0FJxDSNJgjV0yiCIzcDs0QR+hpvgFhpbdZ6JIr5Nw==} rrweb-cssom@0.7.1: resolution: {integrity: sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==} - rrweb-snapshot@2.0.0-alpha.17: - resolution: {integrity: sha512-GBg5pV8LHOTbeVmH2VHLEFR0mc2QpQMzAvcoxEGfPNWgWHc8UvKCyq7pqN1vA+fDZ+yXXbixeO0kB2pzVvFCBw==} + rrweb-snapshot@2.0.0-alpha.18: + resolution: {integrity: sha512-hBHZL/NfgQX6wO1D9mpwqFu1NJPpim+moIcKhFEjVTZVRUfCln+LOugRc4teVTCISYHN8Cw5e2iNTWCSm+SkoA==} rrweb@2.0.0-alpha.13: resolution: {integrity: sha512-a8GXOCnzWHNaVZPa7hsrLZtNZ3CGjiL+YrkpLo0TfmxGLhjNZbWY2r7pE06p+FcjFNlgUVTmFrSJbK3kO7yxvw==} @@ -8450,8 +8490,8 @@ packages: resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==} engines: {node: '>=6'} - safe-array-concat@1.1.2: - resolution: {integrity: sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==} + safe-array-concat@1.1.3: + resolution: {integrity: sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==} engines: {node: '>=0.4'} safe-buffer@5.1.2: @@ -8460,8 +8500,8 @@ packages: safe-buffer@5.2.1: resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} - safe-regex-test@1.0.3: - resolution: {integrity: sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==} + safe-regex-test@1.1.0: + resolution: {integrity: sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==} engines: {node: '>= 0.4'} safe-stable-stringify@2.5.0: @@ -8489,8 +8529,8 @@ packages: scrypt-js@3.0.1: resolution: {integrity: sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA==} - search-insights@2.17.2: - resolution: {integrity: sha512-zFNpOpUO+tY2D85KrxJ+aqwnIfdEGi06UH2+xEb+Bp9Mwznmauqc9djbnBibJO5mpfUPPa8st6Sx65+vbeO45g==} + search-insights@2.17.3: + resolution: {integrity: sha512-RQPdCYTa8A68uM2jwxoY842xDhvx3E5LFL1LxvxCNMev4o5mLuokczhzjAgGwUZBAmOKZknArSxLKmXtIi2AxQ==} secp256k1@4.0.4: resolution: {integrity: sha512-6JfvwvjUOn8F/jUoBY2Q1v5WY5XS+rj8qSe0v8Y4ezH4InLgTEeOOPQsRll9OV429Pvo6BCHGavIyJfr3TAhsw==} @@ -8612,11 +8652,23 @@ packages: shiki@0.14.7: resolution: {integrity: sha512-dNPAPrxSc87ua2sKJ3H5dQ/6ZaY8RNnaAqK+t0eG7p0Soi2ydiqbGOTaZCqaYvA/uZYfS1LJnemt3Q+mSfcPCg==} - shiki@1.22.0: - resolution: {integrity: sha512-/t5LlhNs+UOKQCYBtl5ZsH/Vclz73GIqT2yQsCBygr8L/ppTdmpL4w3kPLoZJbMKVWtoG77Ue1feOjZfDxvMkw==} + shiki@1.24.2: + resolution: {integrity: sha512-TR1fi6mkRrzW+SKT5G6uKuc32Dj2EEa7Kj0k8kGqiBINb+C1TiflVOiT9ta6GqOJtC4fraxO5SLUaKBcSY38Fg==} + + side-channel-list@1.0.0: + resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} + engines: {node: '>= 0.4'} + + side-channel-map@1.0.1: + resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==} + engines: {node: '>= 0.4'} + + side-channel-weakmap@1.0.2: + resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==} + engines: {node: '>= 0.4'} - side-channel@1.0.6: - resolution: {integrity: sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==} + side-channel@1.1.0: + resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} engines: {node: '>= 0.4'} siginfo@2.0.0: @@ -8652,8 +8704,8 @@ packages: snake-case@3.0.4: resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} - socks-proxy-agent@8.0.4: - resolution: {integrity: sha512-GNAq/eg8Udq2x0eNiFkr9gRg5bA7PXEWagQdeRX4cPSG+X/8V38v637gim9bjFptMk1QWsCTr0ttrJEiXbNnRw==} + socks-proxy-agent@8.0.5: + resolution: {integrity: sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==} engines: {node: '>= 14'} socks@2.8.3: @@ -8729,6 +8781,9 @@ packages: resolution: {integrity: sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + stable-hash@0.0.4: + resolution: {integrity: sha512-LjdcbuBeLcdETCrPn9i8AYAZ1eCtu4ECAWtP7UleOiZ9LzVxRzzUZEoZ8zB24nhkQnDWyET0I+3sWokSDS3E7g==} + stack-generator@2.0.10: resolution: {integrity: sha512-mwnua/hkqM6pF4k8SnmZ2zfETsRUpWXREfA/goT8SLCV4iOFa4bzOX2nDipWAZFPTjLvQB82f5yaodMVhK0yJQ==} @@ -8755,8 +8810,8 @@ packages: resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} engines: {node: '>= 0.8'} - std-env@3.7.0: - resolution: {integrity: sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==} + std-env@3.8.0: + resolution: {integrity: sha512-Bc3YwwCB+OzldMxOXJIIvC6cPRWr/LxOp48CdQTOkPyk/t4JWWJbrilwBd7RJzKV8QW7tJkcgAmeuLLJugl5/w==} stoppable@1.1.0: resolution: {integrity: sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==} @@ -8795,12 +8850,13 @@ packages: string.prototype.repeat@1.0.0: resolution: {integrity: sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==} - string.prototype.trim@1.2.9: - resolution: {integrity: sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==} + string.prototype.trim@1.2.10: + resolution: {integrity: sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==} engines: {node: '>= 0.4'} - string.prototype.trimend@1.0.8: - resolution: {integrity: sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==} + string.prototype.trimend@1.0.9: + resolution: {integrity: sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==} + engines: {node: '>= 0.4'} string.prototype.trimstart@1.0.8: resolution: {integrity: sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==} @@ -8848,8 +8904,8 @@ packages: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} - strip-literal@2.1.0: - resolution: {integrity: sha512-Op+UycaUt/8FbN/Z2TWPBLge3jWrP3xj10f3fnYxf052bKuS3EKs1ZQcVGjnEMdsNVAM+plXRdmjrZ/KgG3Skw==} + strip-literal@2.1.1: + resolution: {integrity: sha512-631UJ6O00eNGfMiWG78ck80dfBab8X6IVFB51jZK5Icd7XAs60Z5y7QdSd/wGIklnWvRbUNloVzhOKKmutxQ6Q==} strnum@1.0.5: resolution: {integrity: sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA==} @@ -8915,10 +8971,6 @@ packages: swap-case@2.0.2: resolution: {integrity: sha512-kc6S2YS/2yXbtkSMunBtKdah4VFETZ8Oh6ONSmSd9bRxhqTrtARUCBUiWXH3xVPpvR7tz2CSnkuXVE42EcGnMw==} - symbol-observable@4.0.0: - resolution: {integrity: sha512-b19dMThMV4HVFynSAM1++gBHAbk2Tc/osgLIBZMKsyqh34jb2e8Os7T6ZW/Bt3pJFdBTd2JwAnAAEQV7rSNvcQ==} - engines: {node: '>=0.10'} - symbol-tree@3.2.4: resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} @@ -8929,8 +8981,8 @@ packages: tabbable@6.2.0: resolution: {integrity: sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==} - tailwindcss@3.4.15: - resolution: {integrity: sha512-r4MeXnfBmSOuKUWmXe6h2CcyfzJCEk4F0pptO5jlnYSIViUkVmsawj80N5h2lO3gwcmSb4n3PuN+e+GC1Guylw==} + tailwindcss@3.4.16: + resolution: {integrity: sha512-TI4Cyx7gDiZ6r44ewaJmt0o6BrMCT5aK5e0rmJ/G9Xq3w7CX/5VXl/zIPEJZFUK5VEqwByyhqNPycPlvcK4ZNw==} engines: {node: '>=14.0.0'} hasBin: true @@ -8996,8 +9048,8 @@ packages: tinyexec@0.3.1: resolution: {integrity: sha512-WiCJLEECkO18gwqIp6+hJg0//p23HXp4S+gGtAKu3mI2F2/sXC4FvHvXvB0zJVVaTPhx1/tOwdbRsa1sOBIKqQ==} - tinyglobby@0.2.9: - resolution: {integrity: sha512-8or1+BGEdk1Zkkw2ii16qSS7uVrQJPre5A9o/XkWPATkk23FZh/15BKFxPnlTy6vkljZxLqYCzzBMj30ZrSvjw==} + tinyglobby@0.2.10: + resolution: {integrity: sha512-Zc+8eJlFMvgatPZTl6A9L/yht8QqdmUNtURHaKZLmKBE12hNPSrqNkUp2cs3M/UKmNVVAMFQYSjYIVHDjW5zew==} engines: {node: '>=12.0.0'} tinypool@0.8.4: @@ -9067,8 +9119,8 @@ packages: trough@2.2.0: resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==} - ts-api-utils@1.3.0: - resolution: {integrity: sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==} + ts-api-utils@1.4.3: + resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==} engines: {node: '>=16'} peerDependencies: typescript: '>=4.2.0' @@ -9083,10 +9135,6 @@ packages: ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} - ts-invariant@0.10.3: - resolution: {integrity: sha512-uivwYcQaxAucv1CzRp2n/QdYPo4ILf9VXgH19zEIjFx2EJufV16P0JtJVpYHy89DItG6Kwj2oIUjrcK5au+4tQ==} - engines: {node: '>=8'} - ts-morph@23.0.0: resolution: {integrity: sha512-FcvFx7a9E8TUe6T3ShihXJLiJOiqyafzFKUO4aqIHDUCIvADdGNShcbc2W5PMr3LerXRv7mafvFZ9lRENxJmug==} @@ -9123,8 +9171,8 @@ packages: tslib@2.6.3: resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} - tslib@2.8.0: - resolution: {integrity: sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA==} + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} tsort@0.0.1: resolution: {integrity: sha512-Tyrf5mxF8Ofs1tNoxA13lFeZ2Zrbd6cKbuH3V+MQ5sb6DtBj5FjrXVsRWT8YvNAQTqNoz66dz1WsbigI22aEnw==} @@ -9233,33 +9281,33 @@ packages: resolution: {integrity: sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==} engines: {node: '>= 0.4'} - typed-array-byte-offset@1.0.2: - resolution: {integrity: sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==} + typed-array-byte-offset@1.0.3: + resolution: {integrity: sha512-GsvTyUHTriq6o/bHcTd0vM7OQ9JEdlvluu9YISaA7+KzDzPaIzEeDFNkTfhdE3MYcNhNi0vq/LlegYgIs5yPAw==} engines: {node: '>= 0.4'} - typed-array-length@1.0.6: - resolution: {integrity: sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==} + typed-array-length@1.0.7: + resolution: {integrity: sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==} engines: {node: '>= 0.4'} typedarray@0.0.6: resolution: {integrity: sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==} - typescript@5.4.2: - resolution: {integrity: sha512-+2/g0Fds1ERlP6JsakQQDXjZdZMM+rqpamFZJEKh4kwTIn3iDkgKtby0CeNd5ATNZ4Ry1ax15TMx0W2V+miizQ==} + typescript@5.5.4: + resolution: {integrity: sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==} engines: {node: '>=14.17'} hasBin: true - typescript@5.6.3: - resolution: {integrity: sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==} + typescript@5.7.2: + resolution: {integrity: sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==} engines: {node: '>=14.17'} hasBin: true typy@3.3.0: resolution: {integrity: sha512-Du53deMF9X9pSM3gVXDjLBq14BUfZWSGKfmmR1kTlg953RaIZehfc8fQuoAiW+SRO6bJsP+59mv1tsH8vwKghg==} - uWebSockets.js@https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/442087c0a01bf146acb7386910739ec81df06700: - resolution: {tarball: https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/442087c0a01bf146acb7386910739ec81df06700} - version: 20.49.0 + uWebSockets.js@https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/6609a88ffa9a16ac5158046761356ce03250a0df: + resolution: {tarball: https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/6609a88ffa9a16ac5158046761356ce03250a0df} + version: 20.51.0 ua-parser-js@1.0.39: resolution: {integrity: sha512-k24RCVWlEcjkdOxYmVJgeD/0a1TiSpqLg+ZalVGV9lsnr4yqu0w7tX/x2xX6G4zpkgQnRf89lxuZ1wsbjXM8lw==} @@ -9281,15 +9329,15 @@ packages: undici-types@5.26.5: resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} - undici-types@6.19.8: - resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} + undici-types@6.20.0: + resolution: {integrity: sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==} undici@5.28.4: resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==} engines: {node: '>=14.0'} - unenv-nightly@2.0.0-20241024-111401-d4156ac: - resolution: {integrity: sha512-xJO1hfY+Te+/XnfCYrCbFbRcgu6XEODND1s5wnVbaBCkuQX7JXF7fHEXPrukFE2j8EOH848P8QN19VO47XN8hw==} + unenv-nightly@2.0.0-20241204-140205-a5d5190: + resolution: {integrity: sha512-jpmAytLeiiW01pl5bhVn9wYJ4vtiLdhGe10oXlJBuQEX8mxjxO8BlEXGHU4vr4yEikjFP1wsomTHt/CLU8kUwg==} unicode-trie@2.0.0: resolution: {integrity: sha512-x7bc76x0bm4prf1VLg79uhAzKw8DVboClSN5VxJuQ+LKDOVEW9CdH+VY7SP+vX7xCYQqzzgQpFqz15zeLvAtZQ==} @@ -9374,8 +9422,8 @@ packages: unist-util-visit@5.0.0: resolution: {integrity: sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==} - universal-cookie@7.2.1: - resolution: {integrity: sha512-GEKneQ0sz8qbobkYM5s9elAx6l7GQDNVl3Siqmc7bt/YccyyXWDPn+fht3J1qMcaLwPrzkty3i+dNfPGP2/9hA==} + universal-cookie@7.2.2: + resolution: {integrity: sha512-fMiOcS3TmzP2x5QV26pIH3mvhexLIT0HmPa3V7Q7knRfT9HG6kTwq02HZGLPw0sAOXrAmotElGRvTLCMbJsvxQ==} universalify@0.1.2: resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} @@ -9428,11 +9476,11 @@ packages: '@types/react': optional: true - use-isomorphic-layout-effect@1.1.2: - resolution: {integrity: sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==} + use-isomorphic-layout-effect@1.2.0: + resolution: {integrity: sha512-q6ayo8DWoPZT0VdG4u3D3uxcgONP3Mevx2i2b0434cwWBoL+aelL1DzkXI6w3PhTZzUeR2kaVlZn70iCiseP6w==} peerDependencies: '@types/react': '*' - react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 peerDependenciesMeta: '@types/react': optional: true @@ -9447,10 +9495,10 @@ packages: '@types/react': optional: true - use-sync-external-store@1.2.2: - resolution: {integrity: sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==} + use-sync-external-store@1.4.0: + resolution: {integrity: sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw==} peerDependencies: - react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} @@ -9522,8 +9570,8 @@ packages: engines: {node: ^18.0.0 || >=20.0.0} hasBin: true - vite@5.4.9: - resolution: {integrity: sha512-20OVpJHh0PAM0oSOELa5GaZNWeDjcAvQjGXy2Uyr+Tp+/D2/Hdz6NLgpJLsarPTA2QJ6v8mX2P1ZfbsSKvdMkg==} + vite@5.4.11: + resolution: {integrity: sha512-c7jFQRklXua0mTzneGW9QVyxFjUgwcihC4bXEtujIo2ouWCe1Ajt/amn2PCxYnhYfd5k09JX3SB7OYWFKYqj8Q==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -9618,8 +9666,8 @@ packages: resolution: {integrity: sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==} engines: {node: '>=18'} - whatwg-url@14.0.0: - resolution: {integrity: sha512-1lfMEm2IEr7RIV+f4lUNPOqfFL+pO+Xw3fJSqmjX9AbXcXcYOkCe1P6+9VBZB6n94af16NfZf+sSk0JCBZC9aw==} + whatwg-url@14.1.0: + resolution: {integrity: sha512-jlf/foYIKywAt3x/XWKZ/3rz8OSJPiWktjmk891alJUEjiVxKX9LEO92qH3hv4aJ0mN3MWPvGMCy8jQi95xK4w==} engines: {node: '>=18'} whatwg-url@5.0.0: @@ -9628,11 +9676,12 @@ packages: whatwg-url@7.1.0: resolution: {integrity: sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==} - which-boxed-primitive@1.0.2: - resolution: {integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==} + which-boxed-primitive@1.1.0: + resolution: {integrity: sha512-Ei7Miu/AXe2JJ4iNF5j/UphAgRoma4trE6PtisM09bPygb3egMH3YLW/befsWb1A1AxvNSFidOFTB18XtnIIng==} + engines: {node: '>= 0.4'} - which-builtin-type@1.1.4: - resolution: {integrity: sha512-bppkmBSsHFmIMSl8BO9TbsyzsvGjVoppt8xUiGzwiu/bhDCGxnpOKCxgqj6GuyHE0mINMDecBFPlOm2hzY084w==} + which-builtin-type@1.2.1: + resolution: {integrity: sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==} engines: {node: '>= 0.4'} which-collection@1.0.2: @@ -9642,8 +9691,8 @@ packages: which-module@2.0.1: resolution: {integrity: sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==} - which-typed-array@1.1.15: - resolution: {integrity: sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==} + which-typed-array@1.1.16: + resolution: {integrity: sha512-g+N+GAWiRj66DngFwHvISJd+ITsyphZvD1vChfVg6cEdnzy53GzB3oy0fUNlvhz7H7+MiqhYr26qxQShCpKTTQ==} engines: {node: '>= 0.4'} which@1.3.1: @@ -9682,20 +9731,20 @@ packages: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} - workerd@1.20241106.1: - resolution: {integrity: sha512-1GdKl0kDw8rrirr/ThcK66Kbl4/jd4h8uHx5g7YHBrnenY5SX1UPuop2cnCzYUxlg55kPjzIqqYslz1muRFgFw==} + workerd@1.20241205.0: + resolution: {integrity: sha512-vso/2n0c5SdBDWiD+Sx5gM7unA6SiZXRVUHDqH1euoP/9mFVHZF8icoYsNLB87b/TX8zNgpae+I5N/xFpd9v0g==} engines: {node: '>=16'} hasBin: true workerpool@6.5.1: resolution: {integrity: sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==} - wrangler@3.87.0: - resolution: {integrity: sha512-BExktnSLeGgG+uxgnr4h9eZ5nefdpTVcTHR+gEIWRvqk07XL04nJwpPYAOIPKPpB7E2tMdDJgNLGQN/CY6e1xQ==} + wrangler@3.95.0: + resolution: {integrity: sha512-3w5852i3FNyDz421K2Qk4v5L8jjwegO5O8E1+VAQmjnm82HFNxpIRUBq0bmM7CTLvOPI/Jjcmj/eAWjQBL7QYg==} engines: {node: '>=16.17.0'} hasBin: true peerDependencies: - '@cloudflare/workers-types': ^4.20241106.0 + '@cloudflare/workers-types': ^4.20241205.0 peerDependenciesMeta: '@cloudflare/workers-types': optional: true @@ -9765,8 +9814,8 @@ packages: resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} engines: {node: '>=0.4'} - xxhash-wasm@1.0.2: - resolution: {integrity: sha512-ibF0Or+FivM9lNrg+HGJfVX8WJqgo+kCLDc4vx6xMeTce7Aj+DLttKbxxRR/gNLSAelRc1omAPlJ77N/Jem07A==} + xxhash-wasm@1.1.0: + resolution: {integrity: sha512-147y/6YNh+tlp6nd/2pWq38i9h6mz/EuQ6njIrmW8D1BS5nCqs0P6DG+m6zTGnNz5I+uhZ0SHxBs9BsPrwcKDA==} y18n@4.0.3: resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==} @@ -9788,8 +9837,8 @@ packages: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} - yaml@2.6.0: - resolution: {integrity: sha512-a6ae//JvKDEra2kdi1qzCyrJW/WZCgFi8ydDV+eXExl95t+5R+ijnqHJbz9tmMh8FUjx3iv2fCQ4dclAQlO2UQ==} + yaml@2.6.1: + resolution: {integrity: sha512-7r0XPzioN/Q9kXBro/XPnA6kznR73DHq+GXh5ON7ZozRO6aMjbmiBuKste2wslTFkC5d1dw0GooOCepZXJ2SAg==} engines: {node: '>= 14'} hasBin: true @@ -9839,164 +9888,133 @@ packages: youch@3.3.4: resolution: {integrity: sha512-UeVBXie8cA35DS6+nBkls68xaBBXCye0CNznrhszZjTbRVnJKQuNsyLKBTTL4ln1o1rh2PKtv35twV7irj5SEg==} - zen-observable-ts@1.2.5: - resolution: {integrity: sha512-QZWQekv6iB72Naeake9hS1KxHlotfRpe+WGNbNx5/ta+R3DNjVO2bswf63gXlWDcs+EMd7XY8HfVQyP1X6T4Zg==} - - zen-observable@0.8.15: - resolution: {integrity: sha512-PQ2PC7R9rslx84ndNBZB/Dkv8V8fZEpk83RLgXtYd0fwUgEjseMn1Dgajh2x6S8QbZAFa9p2qVCEuYZNgve0dQ==} - zod-validation-error@3.4.0: resolution: {integrity: sha512-ZOPR9SVY6Pb2qqO5XHt+MkkTRxGXb4EVtnjc9JpXUOtUB1T9Ru7mZOT361AN3MsetVe7R0a1KZshJDZdgp9miQ==} engines: {node: '>=18.0.0'} peerDependencies: zod: ^3.18.0 - zod@3.23.8: - resolution: {integrity: sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==} + zod@3.24.1: + resolution: {integrity: sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A==} zwitch@2.0.4: resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} snapshots: - '@algolia/autocomplete-core@1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2)(search-insights@2.17.2)': + '@algolia/autocomplete-core@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3)': dependencies: - '@algolia/autocomplete-plugin-algolia-insights': 1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2)(search-insights@2.17.2) - '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2) + '@algolia/autocomplete-plugin-algolia-insights': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3) + '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) transitivePeerDependencies: - '@algolia/client-search' - algoliasearch - search-insights - '@algolia/autocomplete-plugin-algolia-insights@1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2)(search-insights@2.17.2)': + '@algolia/autocomplete-plugin-algolia-insights@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3)': dependencies: - '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2) - search-insights: 2.17.2 + '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) + search-insights: 2.17.3 transitivePeerDependencies: - '@algolia/client-search' - algoliasearch - '@algolia/autocomplete-preset-algolia@1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2)': + '@algolia/autocomplete-preset-algolia@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)': dependencies: - '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2) - '@algolia/client-search': 5.14.2 - algoliasearch: 5.14.2 + '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) + '@algolia/client-search': 5.17.1 + algoliasearch: 5.17.1 - '@algolia/autocomplete-shared@1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2)': + '@algolia/autocomplete-shared@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)': dependencies: - '@algolia/client-search': 5.14.2 - algoliasearch: 5.14.2 + '@algolia/client-search': 5.17.1 + algoliasearch: 5.17.1 - '@algolia/client-abtesting@5.14.2': + '@algolia/client-abtesting@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/client-analytics@5.14.2': + '@algolia/client-analytics@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/client-common@5.14.2': {} + '@algolia/client-common@5.17.1': {} - '@algolia/client-insights@5.14.2': + '@algolia/client-insights@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/client-personalization@5.14.2': + '@algolia/client-personalization@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/client-query-suggestions@5.14.2': + '@algolia/client-query-suggestions@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/client-search@5.14.2': + '@algolia/client-search@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/ingestion@1.14.2': + '@algolia/ingestion@1.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/monitoring@1.14.2': + '@algolia/monitoring@1.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/recommend@5.14.2': + '@algolia/recommend@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + '@algolia/client-common': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 - '@algolia/requester-browser-xhr@5.14.2': + '@algolia/requester-browser-xhr@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 + '@algolia/client-common': 5.17.1 - '@algolia/requester-fetch@5.14.2': + '@algolia/requester-fetch@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 + '@algolia/client-common': 5.17.1 - '@algolia/requester-node-http@5.14.2': + '@algolia/requester-node-http@5.17.1': dependencies: - '@algolia/client-common': 5.14.2 + '@algolia/client-common': 5.17.1 '@alloc/quick-lru@5.2.0': {} '@ampproject/remapping@2.3.0': dependencies: - '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/gen-mapping': 0.3.8 '@jridgewell/trace-mapping': 0.3.25 - '@apollo/client@3.11.8(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) - '@wry/caches': 1.0.1 - '@wry/equality': 0.5.7 - '@wry/trie': 0.5.0 - graphql: 16.9.0 - graphql-tag: 2.12.6(graphql@16.9.0) - hoist-non-react-statics: 3.3.2 - optimism: 0.18.0 - prop-types: 15.8.1 - rehackt: 0.1.0(@types/react@18.3.12)(react@18.3.1) - response-iterator: 0.2.6 - symbol-observable: 4.0.0 - ts-invariant: 0.10.3 - tslib: 2.8.0 - zen-observable-ts: 1.2.5 - optionalDependencies: - graphql-ws: 5.16.0(graphql@16.9.0) - react: 18.3.1 - react-dom: 18.3.1(react@18.3.1) - transitivePeerDependencies: - - '@types/react' - optional: true - - '@arbitrum/sdk@3.7.0': + '@arbitrum/sdk@3.7.1': dependencies: '@ethersproject/address': 5.7.0 '@ethersproject/bignumber': 5.7.0 @@ -10009,13 +10027,13 @@ snapshots: '@ardatan/relay-compiler@12.0.0(encoding@0.1.13)(graphql@16.9.0)': dependencies: - '@babel/core': 7.25.9 - '@babel/generator': 7.25.9 - '@babel/parser': 7.25.9 - '@babel/runtime': 7.25.9 - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 - babel-preset-fbjs: 3.4.0(@babel/core@7.25.9) + '@babel/core': 7.26.0 + '@babel/generator': 7.26.3 + '@babel/parser': 7.26.3 + '@babel/runtime': 7.26.0 + '@babel/traverse': 7.26.4 + '@babel/types': 7.26.3 + babel-preset-fbjs: 3.4.0(@babel/core@7.26.0) chalk: 4.1.2 fb-watchman: 2.0.2 fbjs: 3.0.5(encoding@0.1.13) @@ -10037,115 +10055,117 @@ snapshots: transitivePeerDependencies: - encoding - '@babel/code-frame@7.25.9': + '@babel/code-frame@7.26.2': dependencies: - '@babel/highlight': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + js-tokens: 4.0.0 picocolors: 1.1.1 - '@babel/compat-data@7.25.9': {} + '@babel/compat-data@7.26.3': {} - '@babel/core@7.25.9': + '@babel/core@7.26.0': dependencies: '@ampproject/remapping': 2.3.0 - '@babel/code-frame': 7.25.9 - '@babel/generator': 7.25.9 + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.2 '@babel/helper-compilation-targets': 7.25.9 - '@babel/helper-module-transforms': 7.25.9(@babel/core@7.25.9) - '@babel/helpers': 7.25.9 - '@babel/parser': 7.25.9 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) + '@babel/helpers': 7.26.0 + '@babel/parser': 7.26.0 '@babel/template': 7.25.9 - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 + '@babel/traverse': 7.26.4 + '@babel/types': 7.26.0 convert-source-map: 2.0.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 transitivePeerDependencies: - supports-color - '@babel/generator@7.25.9': + '@babel/generator@7.26.2': + dependencies: + '@babel/parser': 7.26.3 + '@babel/types': 7.26.0 + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 3.1.0 + + '@babel/generator@7.26.3': dependencies: - '@babel/types': 7.25.9 - '@jridgewell/gen-mapping': 0.3.5 + '@babel/parser': 7.26.3 + '@babel/types': 7.26.3 + '@jridgewell/gen-mapping': 0.3.8 '@jridgewell/trace-mapping': 0.3.25 - jsesc: 3.0.2 + jsesc: 3.1.0 '@babel/helper-annotate-as-pure@7.25.9': dependencies: - '@babel/types': 7.25.9 + '@babel/types': 7.26.3 '@babel/helper-compilation-targets@7.25.9': dependencies: - '@babel/compat-data': 7.25.9 + '@babel/compat-data': 7.26.3 '@babel/helper-validator-option': 7.25.9 - browserslist: 4.24.2 + browserslist: 4.24.3 lru-cache: 5.1.1 semver: 6.3.1 - '@babel/helper-create-class-features-plugin@7.25.9(@babel/core@7.25.9)': + '@babel/helper-create-class-features-plugin@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-annotate-as-pure': 7.25.9 '@babel/helper-member-expression-to-functions': 7.25.9 '@babel/helper-optimise-call-expression': 7.25.9 - '@babel/helper-replace-supers': 7.25.9(@babel/core@7.25.9) + '@babel/helper-replace-supers': 7.25.9(@babel/core@7.26.0) '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 - '@babel/traverse': 7.25.9 + '@babel/traverse': 7.26.4 semver: 6.3.1 transitivePeerDependencies: - supports-color '@babel/helper-member-expression-to-functions@7.25.9': dependencies: - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 + '@babel/traverse': 7.26.4 + '@babel/types': 7.26.3 transitivePeerDependencies: - supports-color '@babel/helper-module-imports@7.25.9': dependencies: - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 + '@babel/traverse': 7.26.4 + '@babel/types': 7.26.3 transitivePeerDependencies: - supports-color - '@babel/helper-module-transforms@7.25.9(@babel/core@7.25.9)': + '@babel/helper-module-transforms@7.26.0(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-module-imports': 7.25.9 - '@babel/helper-simple-access': 7.25.9 '@babel/helper-validator-identifier': 7.25.9 - '@babel/traverse': 7.25.9 + '@babel/traverse': 7.26.4 transitivePeerDependencies: - supports-color '@babel/helper-optimise-call-expression@7.25.9': dependencies: - '@babel/types': 7.25.9 + '@babel/types': 7.26.3 '@babel/helper-plugin-utils@7.25.9': {} - '@babel/helper-replace-supers@7.25.9(@babel/core@7.25.9)': + '@babel/helper-replace-supers@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-member-expression-to-functions': 7.25.9 '@babel/helper-optimise-call-expression': 7.25.9 - '@babel/traverse': 7.25.9 - transitivePeerDependencies: - - supports-color - - '@babel/helper-simple-access@7.25.9': - dependencies: - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 + '@babel/traverse': 7.26.4 transitivePeerDependencies: - supports-color '@babel/helper-skip-transparent-expression-wrappers@7.25.9': dependencies: - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 + '@babel/traverse': 7.26.4 + '@babel/types': 7.26.3 transitivePeerDependencies: - supports-color @@ -10155,219 +10175,220 @@ snapshots: '@babel/helper-validator-option@7.25.9': {} - '@babel/helpers@7.25.9': + '@babel/helpers@7.26.0': dependencies: '@babel/template': 7.25.9 - '@babel/types': 7.25.9 + '@babel/types': 7.26.0 - '@babel/highlight@7.25.9': + '@babel/parser@7.26.0': dependencies: - '@babel/helper-validator-identifier': 7.25.9 - chalk: 2.4.2 - js-tokens: 4.0.0 - picocolors: 1.1.1 + '@babel/types': 7.26.0 - '@babel/parser@7.25.9': + '@babel/parser@7.26.3': dependencies: - '@babel/types': 7.25.9 + '@babel/types': 7.26.3 - '@babel/plugin-proposal-class-properties@7.18.6(@babel/core@7.25.9)': + '@babel/plugin-proposal-class-properties@7.18.6(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 - '@babel/helper-create-class-features-plugin': 7.25.9(@babel/core@7.25.9) + '@babel/core': 7.26.0 + '@babel/helper-create-class-features-plugin': 7.25.9(@babel/core@7.26.0) '@babel/helper-plugin-utils': 7.25.9 transitivePeerDependencies: - supports-color - '@babel/plugin-proposal-object-rest-spread@7.20.7(@babel/core@7.25.9)': + '@babel/plugin-proposal-object-rest-spread@7.20.7(@babel/core@7.26.0)': dependencies: - '@babel/compat-data': 7.25.9 - '@babel/core': 7.25.9 + '@babel/compat-data': 7.26.3 + '@babel/core': 7.26.0 '@babel/helper-compilation-targets': 7.25.9 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.25.9) - '@babel/plugin-transform-parameters': 7.25.9(@babel/core@7.25.9) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.26.0) + '@babel/plugin-transform-parameters': 7.25.9(@babel/core@7.26.0) - '@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.25.9)': + '@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-syntax-flow@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-syntax-flow@7.26.0(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-syntax-import-assertions@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-syntax-import-assertions@7.26.0(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-syntax-jsx@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-syntax-jsx@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.25.9)': + '@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-arrow-functions@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-arrow-functions@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-block-scoped-functions@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-block-scoped-functions@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-block-scoping@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-block-scoping@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-classes@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-classes@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-annotate-as-pure': 7.25.9 '@babel/helper-compilation-targets': 7.25.9 '@babel/helper-plugin-utils': 7.25.9 - '@babel/helper-replace-supers': 7.25.9(@babel/core@7.25.9) - '@babel/traverse': 7.25.9 + '@babel/helper-replace-supers': 7.25.9(@babel/core@7.26.0) + '@babel/traverse': 7.26.4 globals: 11.12.0 transitivePeerDependencies: - supports-color - '@babel/plugin-transform-computed-properties@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-computed-properties@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 '@babel/template': 7.25.9 - '@babel/plugin-transform-destructuring@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-destructuring@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-flow-strip-types@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-flow-strip-types@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-syntax-flow': 7.25.9(@babel/core@7.25.9) + '@babel/plugin-syntax-flow': 7.26.0(@babel/core@7.26.0) - '@babel/plugin-transform-for-of@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-for-of@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 transitivePeerDependencies: - supports-color - '@babel/plugin-transform-function-name@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-function-name@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-compilation-targets': 7.25.9 '@babel/helper-plugin-utils': 7.25.9 - '@babel/traverse': 7.25.9 + '@babel/traverse': 7.26.4 transitivePeerDependencies: - supports-color - '@babel/plugin-transform-literals@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-literals@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-member-expression-literals@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-member-expression-literals@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-modules-commonjs@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-modules-commonjs@7.26.3(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 - '@babel/helper-module-transforms': 7.25.9(@babel/core@7.25.9) + '@babel/core': 7.26.0 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) '@babel/helper-plugin-utils': 7.25.9 - '@babel/helper-simple-access': 7.25.9 transitivePeerDependencies: - supports-color - '@babel/plugin-transform-object-super@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-object-super@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/helper-replace-supers': 7.25.9(@babel/core@7.25.9) + '@babel/helper-replace-supers': 7.25.9(@babel/core@7.26.0) transitivePeerDependencies: - supports-color - '@babel/plugin-transform-parameters@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-parameters@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-property-literals@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-property-literals@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-react-display-name@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-react-display-name@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-react-jsx@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-react-jsx@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-annotate-as-pure': 7.25.9 '@babel/helper-module-imports': 7.25.9 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.25.9) - '@babel/types': 7.25.9 + '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.0) + '@babel/types': 7.26.3 transitivePeerDependencies: - supports-color - '@babel/plugin-transform-shorthand-properties@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-shorthand-properties@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/plugin-transform-spread@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-spread@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 transitivePeerDependencies: - supports-color - '@babel/plugin-transform-template-literals@7.25.9(@babel/core@7.25.9)': + '@babel/plugin-transform-template-literals@7.25.9(@babel/core@7.26.0)': dependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 '@babel/helper-plugin-utils': 7.25.9 - '@babel/runtime@7.25.9': + '@babel/runtime@7.26.0': dependencies: regenerator-runtime: 0.14.1 '@babel/template@7.25.9': dependencies: - '@babel/code-frame': 7.25.9 - '@babel/parser': 7.25.9 - '@babel/types': 7.25.9 + '@babel/code-frame': 7.26.2 + '@babel/parser': 7.26.0 + '@babel/types': 7.26.0 - '@babel/traverse@7.25.9': + '@babel/traverse@7.26.4': dependencies: - '@babel/code-frame': 7.25.9 - '@babel/generator': 7.25.9 - '@babel/parser': 7.25.9 + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.3 + '@babel/parser': 7.26.3 '@babel/template': 7.25.9 - '@babel/types': 7.25.9 - debug: 4.3.7(supports-color@8.1.1) + '@babel/types': 7.26.3 + debug: 4.4.0(supports-color@8.1.1) globals: 11.12.0 transitivePeerDependencies: - supports-color - '@babel/types@7.25.9': + '@babel/types@7.26.0': + dependencies: + '@babel/helper-string-parser': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + + '@babel/types@7.26.3': dependencies: '@babel/helper-string-parser': 7.25.9 '@babel/helper-validator-identifier': 7.25.9 @@ -10399,27 +10420,27 @@ snapshots: dependencies: mime: 3.0.0 - '@cloudflare/workerd-darwin-64@1.20241106.1': + '@cloudflare/workerd-darwin-64@1.20241205.0': optional: true - '@cloudflare/workerd-darwin-arm64@1.20241106.1': + '@cloudflare/workerd-darwin-arm64@1.20241205.0': optional: true - '@cloudflare/workerd-linux-64@1.20241106.1': + '@cloudflare/workerd-linux-64@1.20241205.0': optional: true - '@cloudflare/workerd-linux-arm64@1.20241106.1': + '@cloudflare/workerd-linux-arm64@1.20241205.0': optional: true - '@cloudflare/workerd-windows-64@1.20241106.1': + '@cloudflare/workerd-windows-64@1.20241205.0': optional: true - '@cloudflare/workers-shared@0.7.1': + '@cloudflare/workers-shared@0.11.0': dependencies: mime: 3.0.0 - zod: 3.23.8 + zod: 3.24.1 - '@cloudflare/workers-types@4.20241112.0': {} + '@cloudflare/workers-types@4.20241205.0': {} '@corex/deepmerge@4.0.43': {} @@ -10429,27 +10450,28 @@ snapshots: '@docsearch/css@3.8.0': {} - '@docsearch/react@3.8.0(@algolia/client-search@5.14.2)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.2)': + '@docsearch/react@3.8.0(@algolia/client-search@5.17.1)(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.3)': dependencies: - '@algolia/autocomplete-core': 1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2)(search-insights@2.17.2) - '@algolia/autocomplete-preset-algolia': 1.17.7(@algolia/client-search@5.14.2)(algoliasearch@5.14.2) + '@algolia/autocomplete-core': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3) + '@algolia/autocomplete-preset-algolia': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) '@docsearch/css': 3.8.0 - algoliasearch: 5.14.2 + algoliasearch: 5.17.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - search-insights: 2.17.2 + search-insights: 2.17.3 transitivePeerDependencies: - '@algolia/client-search' - '@edgeandnode/common@6.30.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))': + '@edgeandnode/common@6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))': dependencies: '@ethersproject/providers': 5.7.2 + '@pinax/graph-networks-registry': 0.6.5 '@uniswap/sdk-core': 5.9.0 '@uniswap/v3-core': 1.0.1 - '@uniswap/v3-sdk': 3.18.1(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) - dataloader: 2.2.2 + '@uniswap/v3-sdk': 3.19.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + dataloader: 2.2.3 dayjs: 1.11.13 decimal.js: 10.4.3 ethers: 5.7.2 @@ -10462,87 +10484,86 @@ snapshots: - hardhat - utf-8-validate - '@edgeandnode/eslint-config@2.0.3(eslint@8.57.1)(typescript@5.6.3)': + '@edgeandnode/eslint-config@2.0.3(eslint@8.57.1)(typescript@5.7.2)': dependencies: '@hasparus/eslint-plugin': 1.0.0 '@next/eslint-plugin-next': 13.4.9 '@rushstack/eslint-patch': 1.10.4 - '@typescript-eslint/eslint-plugin': 6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3) - '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/eslint-plugin': 6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2))(eslint@8.57.1)(typescript@5.7.2) + '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.7.2) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.3(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.31.0)(eslint@8.57.1) - eslint-plugin-import: 2.31.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) - eslint-plugin-jsx-a11y: 6.10.1(eslint@8.57.1) + eslint-import-resolver-typescript: 3.7.0(eslint-plugin-import@2.31.0)(eslint@8.57.1) + eslint-plugin-import: 2.31.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2))(eslint-import-resolver-typescript@3.7.0)(eslint@8.57.1) + eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1) eslint-plugin-react: 7.37.2(eslint@8.57.1) eslint-plugin-react-hooks: 4.6.2(eslint@8.57.1) eslint-plugin-simple-import-sort: 10.0.0(eslint@8.57.1) eslint-plugin-sonarjs: 0.19.0(eslint@8.57.1) optionalDependencies: - typescript: 5.6.3 + typescript: 5.7.2 transitivePeerDependencies: - eslint-import-resolver-webpack - eslint-plugin-import-x - supports-color - '@edgeandnode/gds@5.34.0(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(@types/react-dom@18.3.1)(@types/react@18.3.12)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)': + '@edgeandnode/gds@5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)': dependencies: - '@edgeandnode/common': 6.30.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) - '@figma/code-connect': 1.2.0 + '@edgeandnode/common': 6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) + '@figma/code-connect': 1.2.4 '@floating-ui/react-dom': 2.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@headlessui/react': 2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@headlessui/react': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@phosphor-icons/react': 2.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-accordion': 1.2.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-alert-dialog': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-dialog': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dropdown-menu': 2.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-label': 2.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-popover': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slider': 1.2.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-switch': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-toast': 1.2.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-tooltip': 1.1.3(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@sindresorhus/slugify': 2.2.1 - '@tailwindcss/container-queries': 0.1.1(tailwindcss@3.4.15(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))) - '@tanem/react-nprogress': 5.0.52(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) - '@theme-ui/match-media': 0.17.0(@theme-ui/core@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(@theme-ui/css@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)))(react@18.3.1) - '@web3icons/react': 3.10.1(react@18.3.1)(typescript@5.6.3) - '@xstate/react': 3.2.2(@types/react@18.3.12)(react@18.3.1)(xstate@4.38.3) + '@radix-ui/react-accordion': 1.2.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-alert-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dropdown-menu': 2.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-label': 2.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-popover': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slider': 1.2.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-switch': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-toast': 1.2.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-tooltip': 1.1.5(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@tailwindcss/container-queries': 0.1.1(tailwindcss@3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))) + '@tanem/react-nprogress': 5.0.53(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) + '@theme-ui/match-media': 0.17.1(@theme-ui/core@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(@theme-ui/css@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)))(react@18.3.1) + '@web3icons/react': 3.13.2(react@18.3.1)(typescript@5.7.2) + '@xstate/react': 3.2.2(@types/react@18.3.16)(react@18.3.1)(xstate@4.38.3) classnames: 2.5.1 color: 4.2.3 dayjs: 1.11.13 escape-string-regexp: 5.0.0 ethers: 5.7.2 - framer-motion: 11.11.9(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) lodash: 4.17.21 md5: 2.3.0 + motion: 11.14.4(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) numeral: 2.0.6 - prism-react-renderer: 2.4.0(react@18.3.1) + prism-react-renderer: 2.4.1(react@18.3.1) prismjs: 1.29.0 react: 18.3.1 - react-aria: 3.35.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react-aria: 3.36.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react-device-detect: 2.2.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react-dom: 18.3.1(react@18.3.1) react-dropzone: 14.2.3(react@18.3.1) react-keyed-flatten-children: 3.0.2(react@18.3.1) react-transition-group: 4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - react-use: 17.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - react-virtuoso: 4.12.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - recharts: 2.13.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - shiki: 1.22.0 - tailwindcss: 3.4.15(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)) - theme-ui: 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) + react-use: 17.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react-virtuoso: 4.12.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + recharts: 2.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + shiki: 1.24.2 + tailwindcss: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) + theme-ui: 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) typy: 3.3.0 - universal-cookie: 7.2.1 + universal-cookie: 7.2.2 xstate: 4.38.3 optionalDependencies: - next: 14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) transitivePeerDependencies: - '@emotion/is-prop-valid' - '@types/react' @@ -10550,46 +10571,44 @@ snapshots: - '@xstate/fsm' - bufferutil - canvas - - debug - hardhat - supports-color - ts-node - typescript - utf-8-validate - '@edgeandnode/go@6.60.0(as4xc7bxwgf6xy75as5gbynhpq)': - dependencies: - '@edgeandnode/common': 6.30.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) - '@edgeandnode/gds': 5.34.0(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(@types/react-dom@18.3.1)(@types/react@18.3.12)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-collapsible': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-dialog': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-navigation-menu': 1.2.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@sindresorhus/slugify': 2.2.1 - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) + '@edgeandnode/go@6.74.0(pvvz7w46g6d2da5mez3pi63ihq)': + dependencies: + '@edgeandnode/common': 6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + '@edgeandnode/gds': 5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-collapsible': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-navigation-menu': 1.2.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) classnames: 2.5.1 escape-string-regexp: 5.0.0 - framer-motion: 11.11.9(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + motion: 11.14.4(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - tailwindcss: 3.4.15(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)) - theme-ui: 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) + tailwindcss: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) + theme-ui: 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) optionalDependencies: - next: 14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) transitivePeerDependencies: - '@emotion/is-prop-valid' - '@types/react' - '@types/react-dom' - ts-node - '@emotion/babel-plugin@11.12.0': + '@emotion/babel-plugin@11.13.5': dependencies: '@babel/helper-module-imports': 7.25.9 - '@babel/runtime': 7.25.9 + '@babel/runtime': 7.26.0 '@emotion/hash': 0.9.2 '@emotion/memoize': 0.9.0 - '@emotion/serialize': 1.3.2 + '@emotion/serialize': 1.3.3 babel-plugin-macros: 3.1.0 convert-source-map: 1.9.0 escape-string-regexp: 4.0.0 @@ -10599,11 +10618,11 @@ snapshots: transitivePeerDependencies: - supports-color - '@emotion/cache@11.13.1': + '@emotion/cache@11.14.0': dependencies: '@emotion/memoize': 0.9.0 '@emotion/sheet': 1.4.0 - '@emotion/utils': 1.4.1 + '@emotion/utils': 1.4.2 '@emotion/weak-memoize': 0.4.0 stylis: 4.2.0 @@ -10619,65 +10638,65 @@ snapshots: '@emotion/memoize@0.9.0': {} - '@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)': + '@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)': dependencies: - '@babel/runtime': 7.25.9 - '@emotion/babel-plugin': 11.12.0 - '@emotion/cache': 11.13.1 - '@emotion/serialize': 1.3.2 - '@emotion/use-insertion-effect-with-fallbacks': 1.1.0(react@18.3.1) - '@emotion/utils': 1.4.1 + '@babel/runtime': 7.26.0 + '@emotion/babel-plugin': 11.13.5 + '@emotion/cache': 11.14.0 + '@emotion/serialize': 1.3.3 + '@emotion/use-insertion-effect-with-fallbacks': 1.2.0(react@18.3.1) + '@emotion/utils': 1.4.2 '@emotion/weak-memoize': 0.4.0 hoist-non-react-statics: 3.3.2 react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 transitivePeerDependencies: - supports-color - '@emotion/serialize@1.3.2': + '@emotion/serialize@1.3.3': dependencies: '@emotion/hash': 0.9.2 '@emotion/memoize': 0.9.0 '@emotion/unitless': 0.10.0 - '@emotion/utils': 1.4.1 + '@emotion/utils': 1.4.2 csstype: 3.1.3 '@emotion/sheet@1.4.0': {} '@emotion/unitless@0.10.0': {} - '@emotion/use-insertion-effect-with-fallbacks@1.1.0(react@18.3.1)': + '@emotion/use-insertion-effect-with-fallbacks@1.2.0(react@18.3.1)': dependencies: react: 18.3.1 - '@emotion/utils@1.4.1': {} + '@emotion/utils@1.4.2': {} '@emotion/weak-memoize@0.4.0': {} '@envelop/core@5.0.2': dependencies: '@envelop/types': 5.0.0 - tslib: 2.8.0 + tslib: 2.8.1 '@envelop/extended-validation@4.1.0(@envelop/core@5.0.2)(graphql@16.9.0)': dependencies: '@envelop/core': 5.0.2 - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@envelop/graphql-jit@8.0.3(@envelop/core@5.0.2)(graphql@16.9.0)': + '@envelop/graphql-jit@8.0.4(@envelop/core@5.0.2)(graphql@16.9.0)': dependencies: '@envelop/core': 5.0.2 graphql: 16.9.0 - graphql-jit: 0.8.6(graphql@16.9.0) - tslib: 2.8.0 + graphql-jit: 0.8.7(graphql@16.9.0) + tslib: 2.8.1 value-or-promise: 1.0.12 '@envelop/types@5.0.0': dependencies: - tslib: 2.8.0 + tslib: 2.8.1 '@esbuild-plugins/node-globals-polyfill@0.2.3(esbuild@0.17.19)': dependencies: @@ -10968,17 +10987,17 @@ snapshots: '@esbuild/win32-x64@0.24.0': optional: true - '@eslint-community/eslint-utils@4.4.0(eslint@8.57.1)': + '@eslint-community/eslint-utils@4.4.1(eslint@8.57.1)': dependencies: eslint: 8.57.1 eslint-visitor-keys: 3.4.3 - '@eslint-community/regexpp@4.11.1': {} + '@eslint-community/regexpp@4.12.1': {} '@eslint/eslintrc@2.1.4': dependencies: ajv: 6.12.6 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) espree: 9.6.1 globals: 13.24.0 ignore: 5.3.2 @@ -11299,20 +11318,19 @@ snapshots: dependencies: fast-deep-equal: 3.1.3 - '@figma/code-connect@1.2.0': + '@figma/code-connect@1.2.4': dependencies: - '@babel/core': 7.25.9 - '@babel/generator': 7.25.9 - '@babel/parser': 7.25.9 - '@babel/types': 7.25.9 + '@babel/core': 7.26.0 + '@babel/generator': 7.26.2 + '@babel/parser': 7.26.0 + '@babel/types': 7.26.0 '@storybook/csf-tools': 7.6.20 - axios: 1.7.7 boxen: 5.1.1 chalk: 4.1.2 commander: 11.1.0 compare-versions: 6.1.1 - cross-spawn: 7.0.3 - dotenv: 16.4.5 + cross-spawn: 7.0.6 + dotenv: 16.4.7 fast-fuzzy: 1.12.0 find-up: 5.0.0 glob: 10.4.5 @@ -11320,18 +11338,18 @@ snapshots: lodash: 4.17.21 minimatch: 9.0.5 ora: 5.4.1 - parse5: 7.2.0 + parse5: 7.2.1 prettier: 2.8.8 prompts: 2.4.2 strip-ansi: 6.0.1 ts-morph: 23.0.0 - typescript: 5.4.2 - zod: 3.23.8 - zod-validation-error: 3.4.0(zod@3.23.8) + typescript: 5.5.4 + undici: 5.28.4 + zod: 3.24.1 + zod-validation-error: 3.4.0(zod@3.24.1) transitivePeerDependencies: - bufferutil - canvas - - debug - supports-color - utf-8-validate @@ -11339,18 +11357,18 @@ snapshots: dependencies: '@floating-ui/utils': 0.2.8 - '@floating-ui/dom@1.6.11': + '@floating-ui/dom@1.6.12': dependencies: '@floating-ui/core': 1.6.8 '@floating-ui/utils': 0.2.8 '@floating-ui/react-dom@2.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@floating-ui/dom': 1.6.11 + '@floating-ui/dom': 1.6.12 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@floating-ui/react@0.26.25(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@floating-ui/react@0.26.28(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@floating-ui/react-dom': 2.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@floating-ui/utils': 0.2.8 @@ -11360,88 +11378,84 @@ snapshots: '@floating-ui/utils@0.2.8': {} - '@formatjs/ecma402-abstract@2.2.0': + '@formatjs/ecma402-abstract@2.3.1': dependencies: - '@formatjs/fast-memoize': 2.2.1 - '@formatjs/intl-localematcher': 0.5.5 - tslib: 2.8.0 + '@formatjs/fast-memoize': 2.2.5 + '@formatjs/intl-localematcher': 0.5.9 + decimal.js: 10.4.3 + tslib: 2.8.1 - '@formatjs/fast-memoize@2.2.1': + '@formatjs/fast-memoize@2.2.5': dependencies: - tslib: 2.8.0 + tslib: 2.8.1 - '@formatjs/icu-messageformat-parser@2.8.0': + '@formatjs/icu-messageformat-parser@2.9.7': dependencies: - '@formatjs/ecma402-abstract': 2.2.0 - '@formatjs/icu-skeleton-parser': 1.8.4 - tslib: 2.8.0 + '@formatjs/ecma402-abstract': 2.3.1 + '@formatjs/icu-skeleton-parser': 1.8.11 + tslib: 2.8.1 - '@formatjs/icu-skeleton-parser@1.8.4': + '@formatjs/icu-skeleton-parser@1.8.11': dependencies: - '@formatjs/ecma402-abstract': 2.2.0 - tslib: 2.8.0 + '@formatjs/ecma402-abstract': 2.3.1 + tslib: 2.8.1 - '@formatjs/intl-localematcher@0.5.5': + '@formatjs/intl-localematcher@0.5.9': dependencies: - tslib: 2.8.0 + tslib: 2.8.1 - '@graphprotocol/client-add-source-name@2.0.7(@graphql-mesh/types@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@graphql-tools/wrap@10.0.10(graphql@16.9.0))(graphql@16.9.0)': + '@graphprotocol/client-add-source-name@2.0.7(@graphql-mesh/types@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@graphql-tools/wrap@10.0.26(graphql@16.9.0))(graphql@16.9.0)': dependencies: - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) graphql: 16.9.0 lodash: 4.17.21 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphprotocol/client-auto-pagination@2.0.7(@graphql-mesh/types@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@graphql-tools/wrap@10.0.10(graphql@16.9.0))(graphql@16.9.0)': + '@graphprotocol/client-auto-pagination@2.0.7(@graphql-mesh/types@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@graphql-tools/wrap@10.0.26(graphql@16.9.0))(graphql@16.9.0)': dependencies: - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) graphql: 16.9.0 lodash: 4.17.21 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphprotocol/client-auto-type-merging@2.0.7(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(graphql@16.9.0)': + '@graphprotocol/client-auto-type-merging@2.0.7(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(graphql@16.9.0)': dependencies: - '@graphql-mesh/transform-type-merging': 0.102.8(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) + '@graphql-mesh/transform-type-merging': 0.102.13(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - '@graphql-mesh/utils' - '@graphprotocol/client-block-tracking@2.0.6(@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@types/node@22.7.8)(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@graphprotocol/client-block-tracking@2.0.6(@graphql-mesh/store@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@types/node@22.10.2)(graphql@16.9.0)': dependencies: - '@graphql-mesh/fusion-runtime': 0.8.14(@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@types/node@22.7.8)(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-mesh/fusion-runtime': 0.8.14(@graphql-mesh/store@0.98.10)(@types/node@22.10.2)(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - '@graphql-mesh/store' - '@types/node' - - '@types/react' - - graphql-ws - - react - - react-dom - - subscriptions-transport-ws - - '@graphprotocol/client-cli@3.0.3(@envelop/core@5.0.2)(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@graphql-tools/merge@9.0.8(graphql@16.9.0))(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@graphql-tools/wrap@10.0.10(graphql@16.9.0))(@types/node@22.7.8)(@types/react@18.3.12)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-ws@5.16.0(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@graphprotocol/client-add-source-name': 2.0.7(@graphql-mesh/types@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@graphql-tools/wrap@10.0.10(graphql@16.9.0))(graphql@16.9.0) - '@graphprotocol/client-auto-pagination': 2.0.7(@graphql-mesh/types@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@graphql-tools/wrap@10.0.10(graphql@16.9.0))(graphql@16.9.0) - '@graphprotocol/client-auto-type-merging': 2.0.7(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(graphql@16.9.0) - '@graphprotocol/client-block-tracking': 2.0.6(@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-tools/delegate@10.0.26(graphql@16.9.0))(@types/node@22.7.8)(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@graphprotocol/client-polling-live': 2.0.1(@envelop/core@5.0.2)(@graphql-tools/merge@9.0.8(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/cli': 0.90.12(@types/node@22.7.8)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/graphql': 0.98.11(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@types/node@22.7.8)(@types/react@18.3.12)(encoding@0.1.13)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(tslib@2.8.0) + + '@graphprotocol/client-cli@3.0.3(@envelop/core@5.0.2)(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@graphql-tools/merge@9.0.13(graphql@16.9.0))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@graphql-tools/wrap@10.0.26(graphql@16.9.0))(@types/node@22.10.2)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0)': + dependencies: + '@graphprotocol/client-add-source-name': 2.0.7(@graphql-mesh/types@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@graphql-tools/wrap@10.0.26(graphql@16.9.0))(graphql@16.9.0) + '@graphprotocol/client-auto-pagination': 2.0.7(@graphql-mesh/types@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@graphql-tools/wrap@10.0.26(graphql@16.9.0))(graphql@16.9.0) + '@graphprotocol/client-auto-type-merging': 2.0.7(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(graphql@16.9.0) + '@graphprotocol/client-block-tracking': 2.0.6(@graphql-mesh/store@0.98.10)(@graphql-tools/delegate@10.2.8(graphql@16.9.0))(@types/node@22.10.2)(graphql@16.9.0) + '@graphprotocol/client-polling-live': 2.0.1(@envelop/core@5.0.2)(@graphql-tools/merge@9.0.13(graphql@16.9.0))(graphql@16.9.0) + '@graphql-mesh/cli': 0.90.12(@types/node@22.10.2)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0) + '@graphql-mesh/graphql': 0.98.11(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@types/node@22.10.2)(encoding@0.1.13)(graphql@16.9.0)(tslib@2.8.1) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - '@envelop/core' - '@graphql-mesh/cross-helpers' @@ -11455,27 +11469,22 @@ snapshots: - '@swc/core' - '@swc/wasm' - '@types/node' - - '@types/react' - bufferutil - encoding - graphql-tag - - graphql-ws - graphql-yoga - - react - - react-dom - - subscriptions-transport-ws - supports-color - utf-8-validate - '@graphprotocol/client-polling-live@2.0.1(@envelop/core@5.0.2)(@graphql-tools/merge@9.0.8(graphql@16.9.0))(graphql@16.9.0)': + '@graphprotocol/client-polling-live@2.0.1(@envelop/core@5.0.2)(@graphql-tools/merge@9.0.13(graphql@16.9.0))(graphql@16.9.0)': dependencies: '@envelop/core': 5.0.2 - '@graphql-tools/merge': 9.0.8(graphql@16.9.0) + '@graphql-tools/merge': 9.0.13(graphql@16.9.0) '@repeaterjs/repeater': 3.0.6 graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphprotocol/common-ts@2.0.10(encoding@0.1.13)': + '@graphprotocol/common-ts@2.0.11(encoding@0.1.13)': dependencies: '@graphprotocol/contracts': 5.3.3 '@graphprotocol/pino-sentry-simple': 0.7.1 @@ -11520,9 +11529,9 @@ snapshots: - bufferutil - utf-8-validate - '@graphprotocol/contracts@6.2.1(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)': + '@graphprotocol/contracts@6.2.1(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)': dependencies: - '@graphprotocol/sdk': 0.5.0(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + '@graphprotocol/sdk': 0.5.0(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) console-table-printer: 2.12.1 transitivePeerDependencies: - bufferutil @@ -11549,18 +11558,18 @@ snapshots: transitivePeerDependencies: - supports-color - '@graphprotocol/sdk@0.5.0(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)': + '@graphprotocol/sdk@0.5.0(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)': dependencies: - '@arbitrum/sdk': 3.7.0 + '@arbitrum/sdk': 3.7.1 '@ethersproject/experimental': 5.7.0 - '@graphprotocol/common-ts': 2.0.10(encoding@0.1.13) - '@graphprotocol/contracts': 6.2.1(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) - '@nomicfoundation/hardhat-network-helpers': 1.0.12(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) - '@nomiclabs/hardhat-ethers': 2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) - debug: 4.3.7(supports-color@8.1.1) + '@graphprotocol/common-ts': 2.0.11(encoding@0.1.13) + '@graphprotocol/contracts': 6.2.1(encoding@0.1.13)(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) + '@nomicfoundation/hardhat-network-helpers': 1.0.12(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + '@nomiclabs/hardhat-ethers': 2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + debug: 4.4.0(supports-color@8.1.1) ethers: 5.7.2 - hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) - hardhat-secure-accounts: 0.0.6(@nomiclabs/hardhat-ethers@2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)))(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) + hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) + hardhat-secure-accounts: 0.0.6(@nomiclabs/hardhat-ethers@2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)))(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) inquirer: 8.2.6 lodash: 4.17.21 yaml: 1.10.2 @@ -11582,9 +11591,9 @@ snapshots: '@graphql-codegen/core@4.0.2(graphql@16.9.0)': dependencies: - '@graphql-codegen/plugin-helpers': 5.0.4(graphql@16.9.0) - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-codegen/plugin-helpers': 5.1.0(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 tslib: 2.6.3 @@ -11608,9 +11617,9 @@ snapshots: lodash: 4.17.21 tslib: 2.4.1 - '@graphql-codegen/plugin-helpers@5.0.4(graphql@16.9.0)': + '@graphql-codegen/plugin-helpers@5.1.0(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) change-case-all: 1.0.15 common-tags: 1.8.2 graphql: 16.9.0 @@ -11620,15 +11629,15 @@ snapshots: '@graphql-codegen/schema-ast@4.1.0(graphql@16.9.0)': dependencies: - '@graphql-codegen/plugin-helpers': 5.0.4(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-codegen/plugin-helpers': 5.1.0(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 tslib: 2.6.3 - '@graphql-codegen/typed-document-node@5.0.10(encoding@0.1.13)(graphql@16.9.0)': + '@graphql-codegen/typed-document-node@5.0.12(encoding@0.1.13)(graphql@16.9.0)': dependencies: - '@graphql-codegen/plugin-helpers': 5.0.4(graphql@16.9.0) - '@graphql-codegen/visitor-plugin-common': 5.4.0(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/plugin-helpers': 5.1.0(graphql@16.9.0) + '@graphql-codegen/visitor-plugin-common': 5.6.0(encoding@0.1.13)(graphql@16.9.0) auto-bind: 4.0.0 change-case-all: 1.0.15 graphql: 16.9.0 @@ -11649,11 +11658,11 @@ snapshots: - encoding - supports-color - '@graphql-codegen/typescript-operations@4.3.0(encoding@0.1.13)(graphql@16.9.0)': + '@graphql-codegen/typescript-operations@4.4.0(encoding@0.1.13)(graphql@16.9.0)': dependencies: - '@graphql-codegen/plugin-helpers': 5.0.4(graphql@16.9.0) - '@graphql-codegen/typescript': 4.1.0(encoding@0.1.13)(graphql@16.9.0) - '@graphql-codegen/visitor-plugin-common': 5.4.0(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/plugin-helpers': 5.1.0(graphql@16.9.0) + '@graphql-codegen/typescript': 4.1.2(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/visitor-plugin-common': 5.6.0(encoding@0.1.13)(graphql@16.9.0) auto-bind: 4.0.0 graphql: 16.9.0 tslib: 2.6.3 @@ -11661,12 +11670,12 @@ snapshots: - encoding - supports-color - '@graphql-codegen/typescript-resolvers@4.3.0(encoding@0.1.13)(graphql@16.9.0)': + '@graphql-codegen/typescript-resolvers@4.4.1(encoding@0.1.13)(graphql@16.9.0)': dependencies: - '@graphql-codegen/plugin-helpers': 5.0.4(graphql@16.9.0) - '@graphql-codegen/typescript': 4.1.0(encoding@0.1.13)(graphql@16.9.0) - '@graphql-codegen/visitor-plugin-common': 5.4.0(encoding@0.1.13)(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-codegen/plugin-helpers': 5.1.0(graphql@16.9.0) + '@graphql-codegen/typescript': 4.1.2(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/visitor-plugin-common': 5.6.0(encoding@0.1.13)(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) auto-bind: 4.0.0 graphql: 16.9.0 tslib: 2.6.3 @@ -11674,11 +11683,11 @@ snapshots: - encoding - supports-color - '@graphql-codegen/typescript@4.1.0(encoding@0.1.13)(graphql@16.9.0)': + '@graphql-codegen/typescript@4.1.2(encoding@0.1.13)(graphql@16.9.0)': dependencies: - '@graphql-codegen/plugin-helpers': 5.0.4(graphql@16.9.0) + '@graphql-codegen/plugin-helpers': 5.1.0(graphql@16.9.0) '@graphql-codegen/schema-ast': 4.1.0(graphql@16.9.0) - '@graphql-codegen/visitor-plugin-common': 5.4.0(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/visitor-plugin-common': 5.6.0(encoding@0.1.13)(graphql@16.9.0) auto-bind: 4.0.0 graphql: 16.9.0 tslib: 2.6.3 @@ -11703,12 +11712,12 @@ snapshots: - encoding - supports-color - '@graphql-codegen/visitor-plugin-common@5.4.0(encoding@0.1.13)(graphql@16.9.0)': + '@graphql-codegen/visitor-plugin-common@5.6.0(encoding@0.1.13)(graphql@16.9.0)': dependencies: - '@graphql-codegen/plugin-helpers': 5.0.4(graphql@16.9.0) + '@graphql-codegen/plugin-helpers': 5.1.0(graphql@16.9.0) '@graphql-tools/optimize': 2.0.0(graphql@16.9.0) - '@graphql-tools/relay-operation-optimizer': 7.0.2(encoding@0.1.13)(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/relay-operation-optimizer': 7.0.7(encoding@0.1.13)(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) auto-bind: 4.0.0 change-case-all: 1.0.15 dependency-graph: 0.11.0 @@ -11720,6 +11729,10 @@ snapshots: - encoding - supports-color + '@graphql-hive/gateway-abort-signal-any@0.0.1': + dependencies: + tslib: 2.8.1 + '@graphql-inspector/core@6.1.0(graphql@16.9.0)': dependencies: dependency-graph: 1.0.0 @@ -11727,34 +11740,41 @@ snapshots: object-inspect: 1.13.1 tslib: 2.6.2 - '@graphql-mesh/cache-localforage@0.98.10(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-inspector/core@6.2.1(graphql@16.9.0)': + dependencies: + dependency-graph: 1.0.0 + graphql: 16.9.0 + object-inspect: 1.13.2 + tslib: 2.6.2 + + '@graphql-mesh/cache-localforage@0.98.10(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) graphql: 16.9.0 localforage: 1.10.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/cli@0.90.12(@types/node@22.7.8)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0)': + '@graphql-mesh/cli@0.90.12(@types/node@22.10.2)(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0)': dependencies: '@graphql-codegen/core': 4.0.2(graphql@16.9.0) - '@graphql-codegen/typed-document-node': 5.0.10(encoding@0.1.13)(graphql@16.9.0) - '@graphql-codegen/typescript': 4.1.0(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/typed-document-node': 5.0.12(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/typescript': 4.1.2(encoding@0.1.13)(graphql@16.9.0) '@graphql-codegen/typescript-generic-sdk': 3.1.0(encoding@0.1.13)(graphql-tag@2.12.6(graphql@16.9.0))(graphql@16.9.0) - '@graphql-codegen/typescript-operations': 4.3.0(encoding@0.1.13)(graphql@16.9.0) - '@graphql-codegen/typescript-resolvers': 4.3.0(encoding@0.1.13)(graphql@16.9.0) - '@graphql-mesh/config': 0.100.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/http': 0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/runtime': 0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-codegen/typescript-operations': 4.4.0(encoding@0.1.13)(graphql@16.9.0) + '@graphql-codegen/typescript-resolvers': 4.4.1(encoding@0.1.13)(graphql@16.9.0) + '@graphql-mesh/config': 0.100.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/http': 0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/runtime': 0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) ajv: 8.17.1 change-case: 4.1.2 - cosmiconfig: 9.0.0(typescript@5.6.3) - dotenv: 16.4.5 + cosmiconfig: 9.0.0(typescript@5.7.2) + dotenv: 16.4.7 graphql: 16.9.0 graphql-import-node: 0.0.5(graphql@16.9.0) graphql-ws: 5.16.0(graphql@16.9.0) @@ -11764,15 +11784,15 @@ snapshots: open: 7.4.2 pascal-case: 3.1.2 rimraf: 5.0.10 - ts-node: 10.9.2(@types/node@22.7.8)(typescript@5.6.3) + ts-node: 10.9.2(@types/node@22.10.2)(typescript@5.7.2) tsconfig-paths: 4.2.0 - tslib: 2.8.0 - typescript: 5.6.3 + tslib: 2.8.1 + typescript: 5.7.2 ws: 8.18.0 yargs: 17.7.2 optionalDependencies: node-libcurl: 4.0.0(encoding@0.1.13) - uWebSockets.js: https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/442087c0a01bf146acb7386910739ec81df06700 + uWebSockets.js: https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/6609a88ffa9a16ac5158046761356ce03250a0df transitivePeerDependencies: - '@swc/core' - '@swc/wasm' @@ -11784,488 +11804,510 @@ snapshots: - supports-color - utf-8-validate - '@graphql-mesh/config@0.100.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/config@0.100.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: '@envelop/core': 5.0.2 - '@graphql-mesh/cache-localforage': 0.98.10(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/merger-bare': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/merger-stitching': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/runtime': 0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/code-file-loader': 8.1.4(graphql@16.9.0) - '@graphql-tools/graphql-file-loader': 8.0.2(graphql@16.9.0) - '@graphql-tools/load': 8.0.3(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-yoga/plugin-persisted-operations': 3.7.0(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0) - '@whatwg-node/fetch': 0.9.22 + '@graphql-mesh/cache-localforage': 0.98.10(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/merger-bare': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/merger-stitching': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/runtime': 0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/code-file-loader': 8.1.9(graphql@16.9.0) + '@graphql-tools/graphql-file-loader': 8.0.7(graphql@16.9.0) + '@graphql-tools/load': 8.0.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-yoga/plugin-persisted-operations': 3.10.5(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0) + '@whatwg-node/fetch': 0.9.23 camel-case: 4.1.2 graphql: 16.9.0 param-case: 3.0.4 pascal-case: 3.1.2 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - graphql-yoga - supports-color - '@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)': + '@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 path-browserify: 1.0.1 - '@graphql-mesh/fusion-runtime@0.8.14(@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@types/node@22.7.8)(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@graphql-mesh/fusion-runtime@0.8.14(@graphql-mesh/store@0.98.10)(@types/node@22.10.2)(graphql@16.9.0)': dependencies: '@envelop/core': 5.0.2 - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/runtime': 0.103.9(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/transport-common': 0.7.9(@graphql-mesh/types@0.102.8)(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.102.8(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/executor': 1.3.2(graphql@16.9.0) - '@graphql-tools/federation': 2.2.17(@types/node@22.7.8)(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@graphql-tools/stitch': 9.2.15(graphql@16.9.0) - '@graphql-tools/stitching-directives': 3.1.7(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/runtime': 0.103.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-mesh/utils@0.102.13(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/transport-common': 0.7.23(graphql@16.9.0) + '@graphql-mesh/types': 0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.102.13(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/executor': 1.3.8(graphql@16.9.0) + '@graphql-tools/federation': 2.2.40(@types/node@22.10.2)(graphql@16.9.0) + '@graphql-tools/stitch': 9.4.10(graphql@16.9.0) + '@graphql-tools/stitching-directives': 3.1.23(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) '@whatwg-node/disposablestack': 0.0.5 change-case: 4.1.2 graphql: 16.9.0 - graphql-yoga: 5.7.0(graphql@16.9.0) - tslib: 2.8.0 + graphql-yoga: 5.10.5(graphql@16.9.0) + tslib: 2.8.1 transitivePeerDependencies: - '@graphql-mesh/store' - '@types/node' - - '@types/react' - - graphql-ws - - react - - react-dom - - subscriptions-transport-ws - - '@graphql-mesh/graphql@0.98.11(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(@types/node@22.7.8)(@types/react@18.3.12)(encoding@0.1.13)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(tslib@2.8.0)': - dependencies: - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/string-interpolation': 0.5.6(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.102.8(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/federation': 2.2.17(@types/node@22.7.8)(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@graphql-tools/url-loader': 8.0.7(@types/node@22.7.8)(encoding@0.1.13)(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + + '@graphql-mesh/graphql@0.98.11(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(@types/node@22.10.2)(encoding@0.1.13)(graphql@16.9.0)(tslib@2.8.1)': + dependencies: + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/string-interpolation': 0.5.7(graphql@16.9.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/federation': 2.2.40(@types/node@22.10.2)(graphql@16.9.0) + '@graphql-tools/url-loader': 8.0.19(@types/node@22.10.2)(encoding@0.1.13)(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 lodash.get: 4.4.2 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - '@types/node' - - '@types/react' - bufferutil - encoding - - graphql-ws - - react - - react-dom - - subscriptions-transport-ws - utf-8-validate - '@graphql-mesh/http@0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/http@0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/runtime': 0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@whatwg-node/server': 0.9.50 + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/runtime': 0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@whatwg-node/server': 0.9.63 graphql: 16.9.0 - graphql-yoga: 5.7.0(graphql@16.9.0) - tslib: 2.8.0 + graphql-yoga: 5.10.5(graphql@16.9.0) + tslib: 2.8.1 - '@graphql-mesh/merger-bare@0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/merger-bare@0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-mesh/merger-stitching': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) + '@graphql-mesh/merger-stitching': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) '@graphql-tools/schema': 10.0.4(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - '@graphql-mesh/store' - '@graphql-mesh/merger-stitching@0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/merger-stitching@0.98.10(@graphql-mesh/store@0.98.10)(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/stitch': 9.2.15(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/stitch': 9.4.10(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/runtime@0.103.9(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/runtime@0.103.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-mesh/utils@0.102.13(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: '@envelop/core': 5.0.2 '@envelop/extended-validation': 4.1.0(@envelop/core@5.0.2)(graphql@16.9.0) - '@envelop/graphql-jit': 8.0.3(@envelop/core@5.0.2)(graphql@16.9.0) - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/string-interpolation': 0.5.6(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.102.8(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/batch-delegate': 9.0.8(graphql@16.9.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/executor': 1.3.2(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) - '@whatwg-node/fetch': 0.9.22 + '@envelop/graphql-jit': 8.0.4(@envelop/core@5.0.2)(graphql@16.9.0) + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/string-interpolation': 0.5.7(graphql@16.9.0) + '@graphql-mesh/types': 0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.102.13(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/batch-delegate': 9.0.24(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/executor': 1.3.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) + '@whatwg-node/fetch': 0.9.23 graphql: 16.9.0 graphql-jit: 0.8.7(graphql@16.9.0) - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/runtime@0.99.12(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: '@envelop/core': 5.0.2 '@envelop/extended-validation': 4.1.0(@envelop/core@5.0.2)(graphql@16.9.0) - '@envelop/graphql-jit': 8.0.3(@envelop/core@5.0.2)(graphql@16.9.0) - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/string-interpolation': 0.5.6(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.102.8(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/batch-delegate': 9.0.8(graphql@16.9.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/executor': 1.3.2(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) - '@whatwg-node/fetch': 0.9.22 + '@envelop/graphql-jit': 8.0.4(@envelop/core@5.0.2)(graphql@16.9.0) + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/string-interpolation': 0.5.7(graphql@16.9.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/batch-delegate': 9.0.24(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/executor': 1.3.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) + '@whatwg-node/fetch': 0.9.23 graphql: 16.9.0 graphql-jit: 0.8.2(graphql@16.9.0) - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/store@0.103.8(graphql@16.9.0)': + dependencies: + '@graphql-inspector/core': 6.2.1(graphql@16.9.0) + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/types': 0.103.8(graphql@16.9.0) + '@graphql-mesh/utils': 0.103.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + graphql: 16.9.0 + tslib: 2.8.1 + + '@graphql-mesh/store@0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: '@graphql-inspector/core': 6.1.0(graphql@16.9.0) - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.102.8(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/string-interpolation@0.5.6(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/string-interpolation@0.5.7(graphql@16.9.0)': dependencies: dayjs: 1.11.13 graphql: 16.9.0 json-pointer: 0.6.2 lodash.get: 4.4.2 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/transform-type-merging@0.102.8(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/transform-type-merging@0.102.13(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/utils': 0.102.8(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/stitching-directives': 3.1.7(graphql@16.9.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/stitching-directives': 3.1.23(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/transport-common@0.7.9(@graphql-mesh/types@0.102.8)(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/transport-common@0.7.23(graphql@16.9.0)': dependencies: '@envelop/core': 5.0.2 - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-hive/gateway-abort-signal-any': 0.0.1 + '@graphql-mesh/types': 0.103.8(graphql@16.9.0) + '@graphql-tools/executor': 1.3.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/types@0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/batch-delegate': 9.0.8(graphql@16.9.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/batch-delegate': 9.0.24(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/types@0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/types@0.103.8(graphql@16.9.0)': dependencies: - '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-mesh/utils@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/batch-delegate': 9.0.8(graphql@16.9.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-mesh/store': 0.103.8(graphql@16.9.0) + '@graphql-tools/batch-delegate': 9.0.24(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/utils@0.102.8(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.102.8)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/types@0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/string-interpolation': 0.5.6(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.102.8(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-mesh/store': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-mesh/utils@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/batch-delegate': 9.0.24(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) + graphql: 16.9.0 + tslib: 2.8.1 + + '@graphql-mesh/utils@0.102.13(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1))(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': + dependencies: + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/string-interpolation': 0.5.7(graphql@16.9.0) + '@graphql-mesh/types': 0.102.13(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) '@whatwg-node/disposablestack': 0.0.5 - '@whatwg-node/fetch': 0.9.22 + '@whatwg-node/fetch': 0.10.1 dset: 3.1.4 graphql: 16.9.0 js-yaml: 4.1.0 lodash.get: 4.4.2 lodash.topath: 4.5.2 tiny-lru: 11.2.11 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-mesh/utils@0.98.10(@graphql-mesh/cross-helpers@0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0)': + '@graphql-mesh/utils@0.103.8(graphql@16.9.0)': dependencies: - '@graphql-mesh/cross-helpers': 0.4.7(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0) - '@graphql-mesh/string-interpolation': 0.5.6(graphql@16.9.0)(tslib@2.8.0) - '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@whatwg-node/fetch': 0.9.22 - disposablestack: 1.1.6 + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/string-interpolation': 0.5.7(graphql@16.9.0) + '@graphql-mesh/types': 0.103.8(graphql@16.9.0) + '@graphql-tools/batch-delegate': 9.0.24(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) + '@whatwg-node/disposablestack': 0.0.5 + '@whatwg-node/fetch': 0.10.1 dset: 3.1.4 graphql: 16.9.0 js-yaml: 4.1.0 lodash.get: 4.4.2 lodash.topath: 4.5.2 tiny-lru: 11.2.11 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-tools/batch-delegate@9.0.8(graphql@16.9.0)': + '@graphql-mesh/utils@0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.9.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1)': dependencies: - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - dataloader: 2.2.2 + '@graphql-mesh/cross-helpers': 0.4.9(graphql@16.9.0) + '@graphql-mesh/string-interpolation': 0.5.7(graphql@16.9.0) + '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.3(graphql@16.9.0))(graphql@16.9.0)(tslib@2.8.1) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@whatwg-node/fetch': 0.9.23 + disposablestack: 1.1.7 + dset: 3.1.4 graphql: 16.9.0 - tslib: 2.8.0 - value-or-promise: 1.0.12 + js-yaml: 4.1.0 + lodash.get: 4.4.2 + lodash.topath: 4.5.2 + tiny-lru: 11.2.11 + tslib: 2.8.1 - '@graphql-tools/batch-execute@9.0.5(graphql@16.9.0)': + '@graphql-tools/batch-delegate@9.0.24(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - dataloader: 2.2.2 + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + dataloader: 2.2.3 graphql: 16.9.0 - tslib: 2.8.0 - value-or-promise: 1.0.12 + tslib: 2.8.1 - '@graphql-tools/code-file-loader@8.1.4(graphql@16.9.0)': + '@graphql-tools/batch-execute@9.0.10(graphql@16.9.0)': dependencies: - '@graphql-tools/graphql-tag-pluck': 8.3.3(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + dataloader: 2.2.3 + graphql: 16.9.0 + tslib: 2.8.1 + + '@graphql-tools/code-file-loader@8.1.9(graphql@16.9.0)': + dependencies: + '@graphql-tools/graphql-tag-pluck': 8.3.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 unixify: 1.0.0 transitivePeerDependencies: - supports-color - '@graphql-tools/delegate@10.0.26(graphql@16.9.0)': + '@graphql-tools/delegate@10.2.8(graphql@16.9.0)': dependencies: - '@graphql-tools/batch-execute': 9.0.5(graphql@16.9.0) - '@graphql-tools/executor': 1.3.2(graphql@16.9.0) - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/batch-execute': 9.0.10(graphql@16.9.0) + '@graphql-tools/executor': 1.3.8(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) '@repeaterjs/repeater': 3.0.6 - dataloader: 2.2.2 + dataloader: 2.2.3 + dset: 3.1.4 graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-tools/executor-graphql-ws@1.3.1(graphql@16.9.0)': + '@graphql-tools/executor-graphql-ws@1.3.5(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@types/ws': 8.5.12 + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@whatwg-node/disposablestack': 0.0.5 graphql: 16.9.0 graphql-ws: 5.16.0(graphql@16.9.0) isomorphic-ws: 5.0.0(ws@8.18.0) - tslib: 2.8.0 + tslib: 2.8.1 ws: 8.18.0 transitivePeerDependencies: - bufferutil - utf-8-validate - '@graphql-tools/executor-http@1.1.7(@types/node@22.7.8)(graphql@16.9.0)': + '@graphql-tools/executor-http@1.2.1(@types/node@22.10.2)(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-hive/gateway-abort-signal-any': 0.0.1 + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) '@repeaterjs/repeater': 3.0.6 - '@whatwg-node/fetch': 0.9.22 + '@whatwg-node/disposablestack': 0.0.5 + '@whatwg-node/fetch': 0.10.1 extract-files: 11.0.0 graphql: 16.9.0 - meros: 1.3.0(@types/node@22.7.8) - tslib: 2.8.0 + meros: 1.3.0(@types/node@22.10.2) + tslib: 2.8.1 value-or-promise: 1.0.12 transitivePeerDependencies: - '@types/node' - '@graphql-tools/executor-legacy-ws@1.1.1(graphql@16.9.0)': + '@graphql-tools/executor-legacy-ws@1.1.6(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@types/ws': 8.5.12 + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@types/ws': 8.5.13 graphql: 16.9.0 isomorphic-ws: 5.0.0(ws@8.18.0) - tslib: 2.8.0 + tslib: 2.8.1 ws: 8.18.0 transitivePeerDependencies: - bufferutil - utf-8-validate - '@graphql-tools/executor@1.3.2(graphql@16.9.0)': + '@graphql-tools/executor@1.3.8(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) '@repeaterjs/repeater': 3.0.6 + '@whatwg-node/disposablestack': 0.0.5 graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 value-or-promise: 1.0.12 - '@graphql-tools/federation@2.2.17(@types/node@22.7.8)(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@graphql-tools/federation@2.2.40(@types/node@22.10.2)(graphql@16.9.0)': dependencies: - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/executor-http': 1.1.7(@types/node@22.7.8)(graphql@16.9.0) - '@graphql-tools/merge': 9.0.8(graphql@16.9.0) - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/stitch': 9.2.15(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) - '@whatwg-node/fetch': 0.9.22 + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/executor-http': 1.2.1(@types/node@22.10.2)(graphql@16.9.0) + '@graphql-tools/merge': 9.0.13(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/stitch': 9.4.10(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) + '@whatwg-node/fetch': 0.10.1 graphql: 16.9.0 - tslib: 2.8.0 - value-or-promise: 1.0.12 - optionalDependencies: - '@apollo/client': 3.11.8(@types/react@18.3.12)(graphql-ws@5.16.0(graphql@16.9.0))(graphql@16.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + tslib: 2.8.1 transitivePeerDependencies: - '@types/node' - - '@types/react' - - graphql-ws - - react - - react-dom - - subscriptions-transport-ws - '@graphql-tools/graphql-file-loader@8.0.2(graphql@16.9.0)': + '@graphql-tools/graphql-file-loader@8.0.7(graphql@16.9.0)': dependencies: - '@graphql-tools/import': 7.0.2(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/import': 7.0.7(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 unixify: 1.0.0 - '@graphql-tools/graphql-tag-pluck@8.3.3(graphql@16.9.0)': + '@graphql-tools/graphql-tag-pluck@8.3.8(graphql@16.9.0)': dependencies: - '@babel/core': 7.25.9 - '@babel/parser': 7.25.9 - '@babel/plugin-syntax-import-assertions': 7.25.9(@babel/core@7.25.9) - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@babel/core': 7.26.0 + '@babel/parser': 7.26.3 + '@babel/plugin-syntax-import-assertions': 7.26.0(@babel/core@7.26.0) + '@babel/traverse': 7.26.4 + '@babel/types': 7.26.3 + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - supports-color - '@graphql-tools/import@7.0.2(graphql@16.9.0)': + '@graphql-tools/import@7.0.7(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 resolve-from: 5.0.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-tools/load@8.0.3(graphql@16.9.0)': + '@graphql-tools/load@8.0.8(graphql@16.9.0)': dependencies: - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 p-limit: 3.1.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-tools/merge@9.0.8(graphql@16.9.0)': + '@graphql-tools/merge@9.0.13(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 '@graphql-tools/optimize@1.4.0(graphql@16.9.0)': dependencies: graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 '@graphql-tools/optimize@2.0.0(graphql@16.9.0)': dependencies: graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 '@graphql-tools/relay-operation-optimizer@6.5.18(encoding@0.1.13)(graphql@16.9.0)': dependencies: '@ardatan/relay-compiler': 12.0.0(encoding@0.1.13)(graphql@16.9.0) '@graphql-tools/utils': 9.2.1(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - encoding - supports-color - '@graphql-tools/relay-operation-optimizer@7.0.2(encoding@0.1.13)(graphql@16.9.0)': + '@graphql-tools/relay-operation-optimizer@7.0.7(encoding@0.1.13)(graphql@16.9.0)': dependencies: '@ardatan/relay-compiler': 12.0.0(encoding@0.1.13)(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 transitivePeerDependencies: - encoding - supports-color - '@graphql-tools/schema@10.0.4(graphql@16.9.0)': + '@graphql-tools/schema@10.0.12(graphql@16.9.0)': dependencies: - '@graphql-tools/merge': 9.0.8(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/merge': 9.0.13(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 value-or-promise: 1.0.12 - '@graphql-tools/schema@10.0.7(graphql@16.9.0)': + '@graphql-tools/schema@10.0.4(graphql@16.9.0)': dependencies: - '@graphql-tools/merge': 9.0.8(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/merge': 9.0.13(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 value-or-promise: 1.0.12 - '@graphql-tools/stitch@9.2.15(graphql@16.9.0)': + '@graphql-tools/stitch@9.4.10(graphql@16.9.0)': dependencies: - '@graphql-tools/batch-delegate': 9.0.8(graphql@16.9.0) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/executor': 1.3.2(graphql@16.9.0) - '@graphql-tools/merge': 9.0.8(graphql@16.9.0) - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) + '@graphql-tools/batch-delegate': 9.0.24(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/executor': 1.3.8(graphql@16.9.0) + '@graphql-tools/merge': 9.0.13(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 - value-or-promise: 1.0.12 + tslib: 2.8.1 - '@graphql-tools/stitching-directives@3.1.7(graphql@16.9.0)': + '@graphql-tools/stitching-directives@3.1.23(graphql@16.9.0)': dependencies: - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-tools/url-loader@8.0.7(@types/node@22.7.8)(encoding@0.1.13)(graphql@16.9.0)': + '@graphql-tools/url-loader@8.0.19(@types/node@22.10.2)(encoding@0.1.13)(graphql@16.9.0)': dependencies: '@ardatan/sync-fetch': 0.0.1(encoding@0.1.13) - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/executor-graphql-ws': 1.3.1(graphql@16.9.0) - '@graphql-tools/executor-http': 1.1.7(@types/node@22.7.8)(graphql@16.9.0) - '@graphql-tools/executor-legacy-ws': 1.1.1(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) - '@graphql-tools/wrap': 10.0.10(graphql@16.9.0) - '@types/ws': 8.5.12 - '@whatwg-node/fetch': 0.9.22 + '@graphql-tools/executor-graphql-ws': 1.3.5(graphql@16.9.0) + '@graphql-tools/executor-http': 1.2.1(@types/node@22.10.2)(graphql@16.9.0) + '@graphql-tools/executor-legacy-ws': 1.1.6(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) + '@graphql-tools/wrap': 10.0.26(graphql@16.9.0) + '@types/ws': 8.5.13 + '@whatwg-node/fetch': 0.10.1 graphql: 16.9.0 isomorphic-ws: 5.0.0(ws@8.18.0) - tslib: 2.8.0 + tslib: 2.8.1 value-or-promise: 1.0.12 ws: 8.18.0 transitivePeerDependencies: @@ -12274,33 +12316,32 @@ snapshots: - encoding - utf-8-validate - '@graphql-tools/utils@10.5.5(graphql@16.9.0)': + '@graphql-tools/utils@10.6.3(graphql@16.9.0)': dependencies: '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) cross-inspect: 1.0.1 dset: 3.1.4 graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 '@graphql-tools/utils@8.13.1(graphql@16.9.0)': dependencies: graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 '@graphql-tools/utils@9.2.1(graphql@16.9.0)': dependencies: '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-tools/wrap@10.0.10(graphql@16.9.0)': + '@graphql-tools/wrap@10.0.26(graphql@16.9.0)': dependencies: - '@graphql-tools/delegate': 10.0.26(graphql@16.9.0) - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/delegate': 10.2.8(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.8.0 - value-or-promise: 1.0.12 + tslib: 2.8.1 '@graphql-typed-document-node/core@3.2.0(graphql@16.9.0)': dependencies: @@ -12308,50 +12349,49 @@ snapshots: '@graphql-yoga/logger@2.0.0': dependencies: - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-yoga/plugin-persisted-operations@3.7.0(@graphql-tools/utils@10.5.5(graphql@16.9.0))(graphql-yoga@5.7.0(graphql@16.9.0))(graphql@16.9.0)': + '@graphql-yoga/plugin-persisted-operations@3.10.5(graphql-yoga@5.10.5(graphql@16.9.0))(graphql@16.9.0)': dependencies: - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) graphql: 16.9.0 - graphql-yoga: 5.7.0(graphql@16.9.0) + graphql-yoga: 5.10.5(graphql@16.9.0) - '@graphql-yoga/subscription@5.0.1': + '@graphql-yoga/subscription@5.0.2': dependencies: - '@graphql-yoga/typed-event-target': 3.0.0 + '@graphql-yoga/typed-event-target': 3.0.1 '@repeaterjs/repeater': 3.0.6 '@whatwg-node/events': 0.1.2 - tslib: 2.8.0 + tslib: 2.8.1 - '@graphql-yoga/typed-event-target@3.0.0': + '@graphql-yoga/typed-event-target@3.0.1': dependencies: '@repeaterjs/repeater': 3.0.6 - tslib: 2.8.0 + tslib: 2.8.1 '@hasparus/eslint-plugin@1.0.0': dependencies: - typescript: 5.6.3 + typescript: 5.7.2 '@headlessui/react@1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@tanstack/react-virtual': 3.10.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@tanstack/react-virtual': 3.11.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) client-only: 0.0.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@headlessui/react@2.1.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@headlessui/react@2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@floating-ui/react': 0.26.25(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@tanstack/react-virtual': 3.10.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@floating-ui/react': 0.26.28(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@tanstack/react-virtual': 3.11.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) '@humanwhocodes/config-array@0.13.0': dependencies: '@humanwhocodes/object-schema': 2.0.3 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -12360,22 +12400,22 @@ snapshots: '@humanwhocodes/object-schema@2.0.3': {} - '@internationalized/date@3.5.6': + '@internationalized/date@3.6.0': dependencies: - '@swc/helpers': 0.5.13 + '@swc/helpers': 0.5.15 - '@internationalized/message@3.1.5': + '@internationalized/message@3.1.6': dependencies: - '@swc/helpers': 0.5.13 - intl-messageformat: 10.7.1 + '@swc/helpers': 0.5.15 + intl-messageformat: 10.7.10 - '@internationalized/number@3.5.4': + '@internationalized/number@3.6.0': dependencies: - '@swc/helpers': 0.5.13 + '@swc/helpers': 0.5.15 - '@internationalized/string@3.2.4': + '@internationalized/string@3.2.5': dependencies: - '@swc/helpers': 0.5.13 + '@swc/helpers': 0.5.15 '@isaacs/cliui@8.0.2': dependencies: @@ -12390,7 +12430,7 @@ snapshots: dependencies: '@sinclair/typebox': 0.27.8 - '@jridgewell/gen-mapping@0.3.5': + '@jridgewell/gen-mapping@0.3.8': dependencies: '@jridgewell/set-array': 1.2.1 '@jridgewell/sourcemap-codec': 1.5.0 @@ -12455,7 +12495,7 @@ snapshots: '@mdx-js/react@2.3.0(react@18.3.1)': dependencies: '@types/mdx': 2.0.13 - '@types/react': 18.3.12 + '@types/react': 18.3.16 react: 18.3.1 '@metamask/eth-sig-util@4.0.1': @@ -12527,37 +12567,37 @@ snapshots: '@next/env@13.5.7': {} - '@next/env@14.2.18': {} + '@next/env@14.2.20': {} '@next/eslint-plugin-next@13.4.9': dependencies: glob: 7.1.7 - '@next/swc-darwin-arm64@14.2.18': + '@next/swc-darwin-arm64@14.2.20': optional: true - '@next/swc-darwin-x64@14.2.18': + '@next/swc-darwin-x64@14.2.20': optional: true - '@next/swc-linux-arm64-gnu@14.2.18': + '@next/swc-linux-arm64-gnu@14.2.20': optional: true - '@next/swc-linux-arm64-musl@14.2.18': + '@next/swc-linux-arm64-musl@14.2.20': optional: true - '@next/swc-linux-x64-gnu@14.2.18': + '@next/swc-linux-x64-gnu@14.2.20': optional: true - '@next/swc-linux-x64-musl@14.2.18': + '@next/swc-linux-x64-musl@14.2.20': optional: true - '@next/swc-win32-arm64-msvc@14.2.18': + '@next/swc-win32-arm64-msvc@14.2.20': optional: true - '@next/swc-win32-ia32-msvc@14.2.18': + '@next/swc-win32-ia32-msvc@14.2.20': optional: true - '@next/swc-win32-x64-msvc@14.2.18': + '@next/swc-win32-x64-msvc@14.2.20': optional: true '@noble/hashes@1.2.0': {} @@ -12601,7 +12641,7 @@ snapshots: '@nomicfoundation/ethereumjs-tx': 5.0.1 '@nomicfoundation/ethereumjs-util': 9.0.1 abstract-level: 1.0.4 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) ethereum-cryptography: 0.1.3 level: 8.0.1 lru-cache: 5.1.1 @@ -12634,7 +12674,7 @@ snapshots: '@nomicfoundation/ethereumjs-common': 4.0.1 '@nomicfoundation/ethereumjs-tx': 5.0.1 '@nomicfoundation/ethereumjs-util': 9.0.1 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) ethereum-cryptography: 0.1.3 mcl-wasm: 0.7.9 rustbn.js: 0.2.0 @@ -12649,7 +12689,7 @@ snapshots: dependencies: '@nomicfoundation/ethereumjs-common': 4.0.1 '@nomicfoundation/ethereumjs-rlp': 5.0.1 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) ethereum-cryptography: 0.1.3 ethers: 5.7.2 js-sdsl: 4.4.2 @@ -12695,7 +12735,7 @@ snapshots: '@nomicfoundation/ethereumjs-trie': 6.0.1 '@nomicfoundation/ethereumjs-tx': 5.0.1 '@nomicfoundation/ethereumjs-util': 9.0.1 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) ethereum-cryptography: 0.1.3 mcl-wasm: 0.7.9 rustbn.js: 0.2.0 @@ -12704,10 +12744,10 @@ snapshots: - supports-color - utf-8-validate - '@nomicfoundation/hardhat-network-helpers@1.0.12(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))': + '@nomicfoundation/hardhat-network-helpers@1.0.12(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))': dependencies: ethereumjs-util: 7.1.5 - hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@nomicfoundation/solidity-analyzer-darwin-arm64@0.1.2': optional: true @@ -12740,18 +12780,18 @@ snapshots: '@nomicfoundation/solidity-analyzer-linux-x64-musl': 0.1.2 '@nomicfoundation/solidity-analyzer-win32-x64-msvc': 0.1.2 - '@nomiclabs/hardhat-ethers@2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))': + '@nomiclabs/hardhat-ethers@2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))': dependencies: ethers: 5.7.2 - hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@npmcli/agent@2.2.2': dependencies: - agent-base: 7.1.1 + agent-base: 7.1.3 http-proxy-agent: 7.0.2 - https-proxy-agent: 7.0.5 + https-proxy-agent: 7.0.6 lru-cache: 10.4.3 - socks-proxy-agent: 8.0.4 + socks-proxy-agent: 8.0.5 transitivePeerDependencies: - supports-color optional: true @@ -12759,7 +12799,7 @@ snapshots: '@npmcli/config@6.4.1': dependencies: '@npmcli/map-workspaces': 3.0.6 - ci-info: 4.0.0 + ci-info: 4.1.0 ini: 4.1.3 nopt: 7.2.1 proc-log: 3.0.0 @@ -12790,6 +12830,8 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) + '@pinax/graph-networks-registry@0.6.5': {} + '@pkgjs/parseargs@0.11.0': optional: true @@ -12797,1379 +12839,1409 @@ snapshots: '@radix-ui/number@1.1.0': {} - '@radix-ui/primitive@1.1.0': {} + '@radix-ui/primitive@1.1.1': {} - '@radix-ui/react-accordion@1.2.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-accordion@1.2.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-collapsible': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-collapsible': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-alert-dialog@1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-alert-dialog@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dialog': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-arrow@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-arrow@1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-collapsible@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-collapsible@1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-collection@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-collection@1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-compose-refs@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-compose-refs@1.1.1(@types/react@18.3.16)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-context@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-context@1.1.1(@types/react@18.3.16)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 - - '@radix-ui/react-context@1.1.1(@types/react@18.3.12)(react@18.3.1)': - dependencies: - react: 18.3.1 - optionalDependencies: - '@types/react': 18.3.12 - - '@radix-ui/react-dialog@1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-focus-scope': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@types/react': 18.3.16 + + '@radix-ui/react-dialog@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) aria-hidden: 1.2.4 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-remove-scroll: 2.6.0(@types/react@18.3.12)(react@18.3.1) + react-remove-scroll: 2.6.0(@types/react@18.3.16)(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-direction@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-direction@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-dismissable-layer@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-dismissable-layer@1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-escape-keydown': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-escape-keydown': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-dropdown-menu@2.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-menu': 2.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-dropdown-menu@2.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-menu': 2.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-focus-guards@1.1.1(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-focus-guards@1.1.1(@types/react@18.3.16)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-focus-scope@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-focus-scope@1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-id@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-id@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-label@2.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-label@2.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-menu@2.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-focus-scope': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-popper': 1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-roving-focus': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-menu@2.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-popper': 1.2.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-roving-focus': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) aria-hidden: 1.2.4 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-remove-scroll: 2.6.0(@types/react@18.3.12)(react@18.3.1) + react-remove-scroll: 2.6.0(@types/react@18.3.16)(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-navigation-menu@1.2.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-previous': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-visually-hidden': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-navigation-menu@1.2.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-previous': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-popover@1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-focus-scope': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-popper': 1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-popover@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-popper': 1.2.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) aria-hidden: 1.2.4 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-remove-scroll: 2.6.0(@types/react@18.3.12)(react@18.3.1) + react-remove-scroll: 2.6.0(@types/react@18.3.16)(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-popper@1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-popper@1.2.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@floating-ui/react-dom': 2.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-arrow': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-rect': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-size': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-arrow': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-rect': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-size': 1.1.0(@types/react@18.3.16)(react@18.3.1) '@radix-ui/rect': 1.1.0 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-portal@1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-portal@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-presence@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-presence@1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-primitive@2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-primitive@2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-roving-focus@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-roving-focus@1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-slider@1.2.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-slider@1.2.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/number': 1.1.0 - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-previous': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-size': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-previous': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-size': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-slot@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-slot@1.1.1(@types/react@18.3.16)(react@18.3.1)': dependencies: - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-switch@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-switch@1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-previous': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-size': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-previous': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-size': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-toast@1.2.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-visually-hidden': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-toast@1.2.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 - - '@radix-ui/react-tooltip@1.1.3(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@radix-ui/primitive': 1.1.0 - '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-popper': 1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) - '@radix-ui/react-visually-hidden': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) + + '@radix-ui/react-tooltip@1.1.5(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-popper': 1.2.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.1(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.16)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) - '@radix-ui/react-use-callback-ref@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-use-callback-ref@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-use-controllable-state@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-use-controllable-state@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-use-escape-keydown@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-use-escape-keydown@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: - '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-use-layout-effect@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-use-layout-effect@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-use-previous@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-use-previous@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-use-rect@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-use-rect@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: '@radix-ui/rect': 1.1.0 react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-use-size@1.1.0(@types/react@18.3.12)(react@18.3.1)': + '@radix-ui/react-use-size@1.1.0(@types/react@18.3.16)(react@18.3.1)': dependencies: - '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.16)(react@18.3.1) react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@radix-ui/react-visually-hidden@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-visually-hidden@1.1.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.16))(@types/react@18.3.16)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 - '@types/react-dom': 18.3.1 + '@types/react': 18.3.16 + '@types/react-dom': 18.3.5(@types/react@18.3.16) '@radix-ui/rect@1.1.0': {} - '@react-aria/breadcrumbs@3.5.18(react@18.3.1)': + '@react-aria/breadcrumbs@3.5.19(react@18.3.1)': dependencies: - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/link': 3.7.6(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/breadcrumbs': 3.7.8(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/link': 3.7.7(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/breadcrumbs': 3.7.9(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/button@3.10.1(react@18.3.1)': + '@react-aria/button@3.11.0(react@18.3.1)': dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/toggle': 3.7.8(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/toolbar': 3.0.0-beta.11(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/toggle': 3.8.0(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/calendar@3.5.13(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@internationalized/date': 3.5.6 - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/live-announcer': 3.4.0 - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/calendar': 3.5.5(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/calendar': 3.4.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/calendar@3.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@internationalized/date': 3.6.0 + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/live-announcer': 3.4.1 + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/calendar': 3.6.0(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/calendar': 3.5.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/checkbox@3.14.8(react@18.3.1)': - dependencies: - '@react-aria/form': 3.0.10(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/toggle': 3.10.9(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/checkbox': 3.6.9(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/toggle': 3.7.8(react@18.3.1) - '@react-types/checkbox': 3.8.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/checkbox@3.15.0(react@18.3.1)': + dependencies: + '@react-aria/form': 3.0.11(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/toggle': 3.10.10(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/checkbox': 3.6.10(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/toggle': 3.8.0(react@18.3.1) + '@react-types/checkbox': 3.9.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 + react: 18.3.1 + + '@react-aria/color@3.0.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/numberfield': 3.11.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/slider': 3.7.14(react@18.3.1) + '@react-aria/spinbutton': 3.6.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/textfield': 3.15.0(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-aria/visually-hidden': 3.8.18(react@18.3.1) + '@react-stately/color': 3.8.1(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-types/color': 3.0.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) - '@react-aria/color@3.0.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/numberfield': 3.11.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/slider': 3.7.13(react@18.3.1) - '@react-aria/spinbutton': 3.6.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/textfield': 3.14.10(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-aria/visually-hidden': 3.8.17(react@18.3.1) - '@react-stately/color': 3.8.0(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-types/color': 3.0.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/combobox@3.11.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/listbox': 3.13.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/live-announcer': 3.4.1 + '@react-aria/menu': 3.16.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/overlays': 3.24.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/textfield': 3.15.0(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/combobox': 3.10.1(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/combobox': 3.13.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/combobox@3.10.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/listbox': 3.13.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/live-announcer': 3.4.0 - '@react-aria/menu': 3.15.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/overlays': 3.23.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/textfield': 3.14.10(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/combobox': 3.10.0(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/combobox': 3.13.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/datepicker@3.12.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@internationalized/date': 3.6.0 + '@internationalized/number': 3.6.0 + '@internationalized/string': 3.2.5 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/form': 3.0.11(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/spinbutton': 3.6.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/datepicker': 3.11.0(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/calendar': 3.5.0(react@18.3.1) + '@react-types/datepicker': 3.9.0(react@18.3.1) + '@react-types/dialog': 3.5.14(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/datepicker@3.11.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@internationalized/date': 3.5.6 - '@internationalized/number': 3.5.4 - '@internationalized/string': 3.2.4 - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/form': 3.0.10(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/spinbutton': 3.6.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/datepicker': 3.10.3(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/calendar': 3.4.10(react@18.3.1) - '@react-types/datepicker': 3.8.3(react@18.3.1) - '@react-types/dialog': 3.5.13(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/dialog@3.5.20(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/overlays': 3.24.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/dialog': 3.5.14(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/dialog@3.5.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@react-aria/disclosure@3.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/overlays': 3.23.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/dialog': 3.5.13(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/ssr': 3.9.7(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/disclosure': 3.0.0(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/dnd@3.7.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@internationalized/string': 3.2.4 - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/live-announcer': 3.4.0 - '@react-aria/overlays': 3.23.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/dnd': 3.4.3(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/dnd@3.8.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@internationalized/string': 3.2.5 + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/live-announcer': 3.4.1 + '@react-aria/overlays': 3.24.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/dnd': 3.5.0(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/focus@3.18.4(react@18.3.1)': + '@react-aria/focus@3.19.0(react@18.3.1)': dependencies: - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 clsx: 2.1.1 react: 18.3.1 - '@react-aria/form@3.0.10(react@18.3.1)': + '@react-aria/form@3.0.11(react@18.3.1)': dependencies: - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/grid@3.10.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/live-announcer': 3.4.0 - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/grid': 3.9.3(react@18.3.1) - '@react-stately/selection': 3.17.0(react@18.3.1) - '@react-types/checkbox': 3.8.4(react@18.3.1) - '@react-types/grid': 3.2.9(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/grid@3.11.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/live-announcer': 3.4.1 + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/grid': 3.10.0(react@18.3.1) + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-types/checkbox': 3.9.0(react@18.3.1) + '@react-types/grid': 3.2.10(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/gridlist@3.9.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/grid': 3.10.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/list': 3.11.0(react@18.3.1) - '@react-stately/tree': 3.8.5(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/gridlist@3.10.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/grid': 3.11.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/list': 3.11.1(react@18.3.1) + '@react-stately/tree': 3.8.6(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/i18n@3.12.3(react@18.3.1)': + '@react-aria/i18n@3.12.4(react@18.3.1)': dependencies: - '@internationalized/date': 3.5.6 - '@internationalized/message': 3.1.5 - '@internationalized/number': 3.5.4 - '@internationalized/string': 3.2.4 - '@react-aria/ssr': 3.9.6(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@internationalized/date': 3.6.0 + '@internationalized/message': 3.1.6 + '@internationalized/number': 3.6.0 + '@internationalized/string': 3.2.5 + '@react-aria/ssr': 3.9.7(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/interactions@3.22.4(react@18.3.1)': + '@react-aria/interactions@3.22.5(react@18.3.1)': dependencies: - '@react-aria/ssr': 3.9.6(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/ssr': 3.9.7(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/label@3.7.12(react@18.3.1)': + '@react-aria/label@3.7.13(react@18.3.1)': dependencies: - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/link@3.7.6(react@18.3.1)': + '@react-aria/link@3.7.7(react@18.3.1)': dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/link': 3.5.8(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/link': 3.5.9(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/listbox@3.13.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/list': 3.11.0(react@18.3.1) - '@react-types/listbox': 3.5.2(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/listbox@3.13.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/list': 3.11.1(react@18.3.1) + '@react-types/listbox': 3.5.3(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/live-announcer@3.4.0': - dependencies: - '@swc/helpers': 0.5.13 - - '@react-aria/menu@3.15.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/overlays': 3.23.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/menu': 3.8.3(react@18.3.1) - '@react-stately/tree': 3.8.5(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/menu': 3.9.12(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/live-announcer@3.4.1': + dependencies: + '@swc/helpers': 0.5.15 + + '@react-aria/menu@3.16.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/overlays': 3.24.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/menu': 3.9.0(react@18.3.1) + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-stately/tree': 3.8.6(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/menu': 3.9.13(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/meter@3.4.17(react@18.3.1)': + '@react-aria/meter@3.4.18(react@18.3.1)': dependencies: - '@react-aria/progress': 3.4.17(react@18.3.1) - '@react-types/meter': 3.4.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/progress': 3.4.18(react@18.3.1) + '@react-types/meter': 3.4.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/numberfield@3.11.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/spinbutton': 3.6.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/textfield': 3.14.10(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/numberfield': 3.9.7(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/numberfield': 3.8.6(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/numberfield@3.11.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/spinbutton': 3.6.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/textfield': 3.15.0(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/numberfield': 3.9.8(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/numberfield': 3.8.7(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/overlays@3.23.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/ssr': 3.9.6(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-aria/visually-hidden': 3.8.17(react@18.3.1) - '@react-stately/overlays': 3.6.11(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/overlays': 3.8.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/overlays@3.24.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/ssr': 3.9.7(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-aria/visually-hidden': 3.8.18(react@18.3.1) + '@react-stately/overlays': 3.6.12(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/overlays': 3.8.11(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/progress@3.4.17(react@18.3.1)': + '@react-aria/progress@3.4.18(react@18.3.1)': dependencies: - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/progress': 3.5.7(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/progress': 3.5.8(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/radio@3.10.9(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/form': 3.0.10(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/radio': 3.10.8(react@18.3.1) - '@react-types/radio': 3.8.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/radio@3.10.10(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/form': 3.0.11(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/radio': 3.10.9(react@18.3.1) + '@react-types/radio': 3.8.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/searchfield@3.7.10(react@18.3.1)': + '@react-aria/searchfield@3.7.11(react@18.3.1)': dependencies: - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/textfield': 3.14.10(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/searchfield': 3.5.7(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/searchfield': 3.5.9(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/textfield': 3.15.0(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/searchfield': 3.5.8(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/searchfield': 3.5.10(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/select@3.14.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/form': 3.0.10(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/listbox': 3.13.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/menu': 3.15.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-aria/visually-hidden': 3.8.17(react@18.3.1) - '@react-stately/select': 3.6.8(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/select': 3.9.7(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/select@3.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/form': 3.0.11(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/listbox': 3.13.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/menu': 3.16.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-aria/visually-hidden': 3.8.18(react@18.3.1) + '@react-stately/select': 3.6.9(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/select': 3.9.8(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/selection@3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@react-aria/selection@3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/selection': 3.17.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/separator@3.4.3(react@18.3.1)': + '@react-aria/separator@3.4.4(react@18.3.1)': dependencies: - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/slider@3.7.13(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/slider': 3.5.8(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/slider': 3.7.6(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/slider@3.7.14(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/slider': 3.6.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/slider': 3.7.7(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/spinbutton@3.6.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@react-aria/spinbutton@3.6.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/live-announcer': 3.4.0 - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/live-announcer': 3.4.1 + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/ssr@3.9.6(react@18.3.1)': + '@react-aria/ssr@3.9.7(react@18.3.1)': dependencies: - '@swc/helpers': 0.5.13 + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/switch@3.6.9(react@18.3.1)': + '@react-aria/switch@3.6.10(react@18.3.1)': dependencies: - '@react-aria/toggle': 3.10.9(react@18.3.1) - '@react-stately/toggle': 3.7.8(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/switch': 3.5.6(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/toggle': 3.10.10(react@18.3.1) + '@react-stately/toggle': 3.8.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/switch': 3.5.7(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/table@3.15.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/grid': 3.10.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/live-announcer': 3.4.0 - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-aria/visually-hidden': 3.8.17(react@18.3.1) - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/flags': 3.0.4 - '@react-stately/table': 3.12.3(react@18.3.1) - '@react-types/checkbox': 3.8.4(react@18.3.1) - '@react-types/grid': 3.2.9(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/table': 3.10.2(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/table@3.16.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/grid': 3.11.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/live-announcer': 3.4.1 + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-aria/visually-hidden': 3.8.18(react@18.3.1) + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/flags': 3.0.5 + '@react-stately/table': 3.13.0(react@18.3.1) + '@react-types/checkbox': 3.9.0(react@18.3.1) + '@react-types/grid': 3.2.10(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/table': 3.10.3(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/tabs@3.9.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@react-aria/tabs@3.9.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/tabs': 3.6.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/tabs': 3.3.10(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/tabs': 3.7.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/tabs': 3.3.11(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/tag@3.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': - dependencies: - '@react-aria/gridlist': 3.9.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/list': 3.11.0(react@18.3.1) - '@react-types/button': 3.10.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/tag@3.4.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@react-aria/gridlist': 3.10.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/list': 3.11.1(react@18.3.1) + '@react-types/button': 3.10.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@react-aria/textfield@3.14.10(react@18.3.1)': - dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/form': 3.0.10(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/textfield': 3.9.7(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/textfield@3.15.0(react@18.3.1)': + dependencies: + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/form': 3.0.11(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/textfield': 3.10.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/toggle@3.10.9(react@18.3.1)': + '@react-aria/toggle@3.10.10(react@18.3.1)': dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/toggle': 3.7.8(react@18.3.1) - '@react-types/checkbox': 3.8.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/toggle': 3.8.0(react@18.3.1) + '@react-types/checkbox': 3.9.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/tooltip@3.7.9(react@18.3.1)': + '@react-aria/toolbar@3.0.0-beta.11(react@18.3.1)': dependencies: - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-stately/tooltip': 3.4.13(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/tooltip': 3.4.12(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-aria/utils@3.25.3(react@18.3.1)': + '@react-aria/tooltip@3.7.10(react@18.3.1)': dependencies: - '@react-aria/ssr': 3.9.6(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-stately/tooltip': 3.5.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/tooltip': 3.4.13(react@18.3.1) + '@swc/helpers': 0.5.15 + react: 18.3.1 + + '@react-aria/utils@3.26.0(react@18.3.1)': + dependencies: + '@react-aria/ssr': 3.9.7(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 clsx: 2.1.1 react: 18.3.1 - '@react-aria/visually-hidden@3.8.17(react@18.3.1)': + '@react-aria/visually-hidden@3.8.18(react@18.3.1)': dependencies: - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/calendar@3.5.5(react@18.3.1)': + '@react-stately/calendar@3.6.0(react@18.3.1)': dependencies: - '@internationalized/date': 3.5.6 - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/calendar': 3.4.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@internationalized/date': 3.6.0 + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/calendar': 3.5.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/checkbox@3.6.9(react@18.3.1)': + '@react-stately/checkbox@3.6.10(react@18.3.1)': dependencies: - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/checkbox': 3.8.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/checkbox': 3.9.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/collections@3.11.0(react@18.3.1)': + '@react-stately/collections@3.12.0(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/color@3.8.0(react@18.3.1)': - dependencies: - '@internationalized/number': 3.5.4 - '@internationalized/string': 3.2.4 - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/numberfield': 3.9.7(react@18.3.1) - '@react-stately/slider': 3.5.8(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/color': 3.0.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/color@3.8.1(react@18.3.1)': + dependencies: + '@internationalized/number': 3.6.0 + '@internationalized/string': 3.2.5 + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/numberfield': 3.9.8(react@18.3.1) + '@react-stately/slider': 3.6.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/color': 3.0.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/combobox@3.10.0(react@18.3.1)': - dependencies: - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/list': 3.11.0(react@18.3.1) - '@react-stately/overlays': 3.6.11(react@18.3.1) - '@react-stately/select': 3.6.8(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/combobox': 3.13.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/combobox@3.10.1(react@18.3.1)': + dependencies: + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/list': 3.11.1(react@18.3.1) + '@react-stately/overlays': 3.6.12(react@18.3.1) + '@react-stately/select': 3.6.9(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/combobox': 3.13.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/datepicker@3.10.3(react@18.3.1)': + '@react-stately/datepicker@3.11.0(react@18.3.1)': dependencies: - '@internationalized/date': 3.5.6 - '@internationalized/string': 3.2.4 - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/overlays': 3.6.11(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/datepicker': 3.8.3(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@internationalized/date': 3.6.0 + '@internationalized/string': 3.2.5 + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/overlays': 3.6.12(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/datepicker': 3.9.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/dnd@3.4.3(react@18.3.1)': + '@react-stately/disclosure@3.0.0(react@18.3.1)': dependencies: - '@react-stately/selection': 3.17.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/flags@3.0.4': + '@react-stately/dnd@3.5.0(react@18.3.1)': dependencies: - '@swc/helpers': 0.5.13 + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 + react: 18.3.1 - '@react-stately/form@3.0.6(react@18.3.1)': + '@react-stately/flags@3.0.5': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@swc/helpers': 0.5.15 + + '@react-stately/form@3.1.0(react@18.3.1)': + dependencies: + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/grid@3.9.3(react@18.3.1)': + '@react-stately/grid@3.10.0(react@18.3.1)': dependencies: - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/selection': 3.17.0(react@18.3.1) - '@react-types/grid': 3.2.9(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-types/grid': 3.2.10(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/list@3.11.0(react@18.3.1)': + '@react-stately/list@3.11.1(react@18.3.1)': dependencies: - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/selection': 3.17.0(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/menu@3.8.3(react@18.3.1)': + '@react-stately/menu@3.9.0(react@18.3.1)': dependencies: - '@react-stately/overlays': 3.6.11(react@18.3.1) - '@react-types/menu': 3.9.12(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/overlays': 3.6.12(react@18.3.1) + '@react-types/menu': 3.9.13(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/numberfield@3.9.7(react@18.3.1)': + '@react-stately/numberfield@3.9.8(react@18.3.1)': dependencies: - '@internationalized/number': 3.5.4 - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/numberfield': 3.8.6(react@18.3.1) - '@swc/helpers': 0.5.13 + '@internationalized/number': 3.6.0 + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/numberfield': 3.8.7(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/overlays@3.6.11(react@18.3.1)': + '@react-stately/overlays@3.6.12(react@18.3.1)': dependencies: - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/overlays': 3.8.10(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/overlays': 3.8.11(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/radio@3.10.8(react@18.3.1)': + '@react-stately/radio@3.10.9(react@18.3.1)': dependencies: - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/radio': 3.8.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/radio': 3.8.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/searchfield@3.5.7(react@18.3.1)': + '@react-stately/searchfield@3.5.8(react@18.3.1)': dependencies: - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/searchfield': 3.5.9(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/searchfield': 3.5.10(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/select@3.6.8(react@18.3.1)': + '@react-stately/select@3.6.9(react@18.3.1)': dependencies: - '@react-stately/form': 3.0.6(react@18.3.1) - '@react-stately/list': 3.11.0(react@18.3.1) - '@react-stately/overlays': 3.6.11(react@18.3.1) - '@react-types/select': 3.9.7(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/form': 3.1.0(react@18.3.1) + '@react-stately/list': 3.11.1(react@18.3.1) + '@react-stately/overlays': 3.6.12(react@18.3.1) + '@react-types/select': 3.9.8(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/selection@3.17.0(react@18.3.1)': + '@react-stately/selection@3.18.0(react@18.3.1)': dependencies: - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/slider@3.5.8(react@18.3.1)': + '@react-stately/slider@3.6.0(react@18.3.1)': dependencies: - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/slider': 3.7.6(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/slider': 3.7.7(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/table@3.12.3(react@18.3.1)': - dependencies: - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/flags': 3.0.4 - '@react-stately/grid': 3.9.3(react@18.3.1) - '@react-stately/selection': 3.17.0(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/grid': 3.2.9(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/table': 3.10.2(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/table@3.13.0(react@18.3.1)': + dependencies: + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/flags': 3.0.5 + '@react-stately/grid': 3.10.0(react@18.3.1) + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/grid': 3.2.10(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/table': 3.10.3(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/tabs@3.6.10(react@18.3.1)': + '@react-stately/tabs@3.7.0(react@18.3.1)': dependencies: - '@react-stately/list': 3.11.0(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/tabs': 3.3.10(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/list': 3.11.1(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/tabs': 3.3.11(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/toggle@3.7.8(react@18.3.1)': + '@react-stately/toggle@3.8.0(react@18.3.1)': dependencies: - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/checkbox': 3.8.4(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/checkbox': 3.9.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/tooltip@3.4.13(react@18.3.1)': + '@react-stately/tooltip@3.5.0(react@18.3.1)': dependencies: - '@react-stately/overlays': 3.6.11(react@18.3.1) - '@react-types/tooltip': 3.4.12(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/overlays': 3.6.12(react@18.3.1) + '@react-types/tooltip': 3.4.13(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/tree@3.8.5(react@18.3.1)': + '@react-stately/tree@3.8.6(react@18.3.1)': dependencies: - '@react-stately/collections': 3.11.0(react@18.3.1) - '@react-stately/selection': 3.17.0(react@18.3.1) - '@react-stately/utils': 3.10.4(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) - '@swc/helpers': 0.5.13 + '@react-stately/collections': 3.12.0(react@18.3.1) + '@react-stately/selection': 3.18.0(react@18.3.1) + '@react-stately/utils': 3.10.5(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-stately/utils@3.10.4(react@18.3.1)': + '@react-stately/utils@3.10.5(react@18.3.1)': dependencies: - '@swc/helpers': 0.5.13 + '@swc/helpers': 0.5.15 react: 18.3.1 - '@react-types/breadcrumbs@3.7.8(react@18.3.1)': + '@react-types/breadcrumbs@3.7.9(react@18.3.1)': dependencies: - '@react-types/link': 3.5.8(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/link': 3.5.9(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/button@3.10.0(react@18.3.1)': + '@react-types/button@3.10.1(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/calendar@3.4.10(react@18.3.1)': + '@react-types/calendar@3.5.0(react@18.3.1)': dependencies: - '@internationalized/date': 3.5.6 - '@react-types/shared': 3.25.0(react@18.3.1) + '@internationalized/date': 3.6.0 + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/checkbox@3.8.4(react@18.3.1)': + '@react-types/checkbox@3.9.0(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/color@3.0.0(react@18.3.1)': + '@react-types/color@3.0.1(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/slider': 3.7.6(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/slider': 3.7.7(react@18.3.1) react: 18.3.1 - '@react-types/combobox@3.13.0(react@18.3.1)': + '@react-types/combobox@3.13.1(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/datepicker@3.8.3(react@18.3.1)': + '@react-types/datepicker@3.9.0(react@18.3.1)': dependencies: - '@internationalized/date': 3.5.6 - '@react-types/calendar': 3.4.10(react@18.3.1) - '@react-types/overlays': 3.8.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) + '@internationalized/date': 3.6.0 + '@react-types/calendar': 3.5.0(react@18.3.1) + '@react-types/overlays': 3.8.11(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/dialog@3.5.13(react@18.3.1)': + '@react-types/dialog@3.5.14(react@18.3.1)': dependencies: - '@react-types/overlays': 3.8.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/overlays': 3.8.11(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/grid@3.2.9(react@18.3.1)': + '@react-types/grid@3.2.10(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/link@3.5.8(react@18.3.1)': + '@react-types/link@3.5.9(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/listbox@3.5.2(react@18.3.1)': + '@react-types/listbox@3.5.3(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/menu@3.9.12(react@18.3.1)': + '@react-types/menu@3.9.13(react@18.3.1)': dependencies: - '@react-types/overlays': 3.8.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/overlays': 3.8.11(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/meter@3.4.4(react@18.3.1)': + '@react-types/meter@3.4.5(react@18.3.1)': dependencies: - '@react-types/progress': 3.5.7(react@18.3.1) + '@react-types/progress': 3.5.8(react@18.3.1) react: 18.3.1 - '@react-types/numberfield@3.8.6(react@18.3.1)': + '@react-types/numberfield@3.8.7(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/overlays@3.8.10(react@18.3.1)': + '@react-types/overlays@3.8.11(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/progress@3.5.7(react@18.3.1)': + '@react-types/progress@3.5.8(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/radio@3.8.4(react@18.3.1)': + '@react-types/radio@3.8.5(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/searchfield@3.5.9(react@18.3.1)': + '@react-types/searchfield@3.5.10(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) - '@react-types/textfield': 3.9.7(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) + '@react-types/textfield': 3.10.0(react@18.3.1) react: 18.3.1 - '@react-types/select@3.9.7(react@18.3.1)': + '@react-types/select@3.9.8(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/shared@3.25.0(react@18.3.1)': + '@react-types/shared@3.26.0(react@18.3.1)': dependencies: react: 18.3.1 - '@react-types/slider@3.7.6(react@18.3.1)': + '@react-types/slider@3.7.7(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/switch@3.5.6(react@18.3.1)': + '@react-types/switch@3.5.7(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/table@3.10.2(react@18.3.1)': + '@react-types/table@3.10.3(react@18.3.1)': dependencies: - '@react-types/grid': 3.2.9(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/grid': 3.2.10(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/tabs@3.3.10(react@18.3.1)': + '@react-types/tabs@3.3.11(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/textfield@3.9.7(react@18.3.1)': + '@react-types/textfield@3.10.0(react@18.3.1)': dependencies: - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 - '@react-types/tooltip@3.4.12(react@18.3.1)': + '@react-types/tooltip@3.4.13(react@18.3.1)': dependencies: - '@react-types/overlays': 3.8.10(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) + '@react-types/overlays': 3.8.11(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 '@repeaterjs/repeater@3.0.6': {} '@resvg/resvg-wasm@2.6.2': {} - '@rollup/rollup-android-arm-eabi@4.24.0': + '@rollup/rollup-android-arm-eabi@4.28.1': optional: true - '@rollup/rollup-android-arm64@4.24.0': + '@rollup/rollup-android-arm64@4.28.1': optional: true - '@rollup/rollup-darwin-arm64@4.24.0': + '@rollup/rollup-darwin-arm64@4.28.1': optional: true - '@rollup/rollup-darwin-x64@4.24.0': + '@rollup/rollup-darwin-x64@4.28.1': optional: true - '@rollup/rollup-linux-arm-gnueabihf@4.24.0': + '@rollup/rollup-freebsd-arm64@4.28.1': optional: true - '@rollup/rollup-linux-arm-musleabihf@4.24.0': + '@rollup/rollup-freebsd-x64@4.28.1': optional: true - '@rollup/rollup-linux-arm64-gnu@4.24.0': + '@rollup/rollup-linux-arm-gnueabihf@4.28.1': optional: true - '@rollup/rollup-linux-arm64-musl@4.24.0': + '@rollup/rollup-linux-arm-musleabihf@4.28.1': optional: true - '@rollup/rollup-linux-powerpc64le-gnu@4.24.0': + '@rollup/rollup-linux-arm64-gnu@4.28.1': optional: true - '@rollup/rollup-linux-riscv64-gnu@4.24.0': + '@rollup/rollup-linux-arm64-musl@4.28.1': optional: true - '@rollup/rollup-linux-s390x-gnu@4.24.0': + '@rollup/rollup-linux-loongarch64-gnu@4.28.1': optional: true - '@rollup/rollup-linux-x64-gnu@4.24.0': + '@rollup/rollup-linux-powerpc64le-gnu@4.28.1': optional: true - '@rollup/rollup-linux-x64-musl@4.24.0': + '@rollup/rollup-linux-riscv64-gnu@4.28.1': optional: true - '@rollup/rollup-win32-arm64-msvc@4.24.0': + '@rollup/rollup-linux-s390x-gnu@4.28.1': optional: true - '@rollup/rollup-win32-ia32-msvc@4.24.0': + '@rollup/rollup-linux-x64-gnu@4.28.1': optional: true - '@rollup/rollup-win32-x64-msvc@4.24.0': + '@rollup/rollup-linux-x64-musl@4.28.1': optional: true - '@rrweb/types@2.0.0-alpha.17': - dependencies: - rrweb-snapshot: 2.0.0-alpha.17 + '@rollup/rollup-win32-arm64-msvc@4.28.1': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.28.1': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.28.1': + optional: true + + '@rrweb/types@2.0.0-alpha.18': {} '@rtsao/scc@1.1.0': {} @@ -14237,32 +14309,32 @@ snapshots: '@sentry/types': 5.30.0 tslib: 1.14.1 - '@shikijs/core@1.22.0': + '@shikijs/core@1.24.2': dependencies: - '@shikijs/engine-javascript': 1.22.0 - '@shikijs/engine-oniguruma': 1.22.0 - '@shikijs/types': 1.22.0 - '@shikijs/vscode-textmate': 9.3.0 + '@shikijs/engine-javascript': 1.24.2 + '@shikijs/engine-oniguruma': 1.24.2 + '@shikijs/types': 1.24.2 + '@shikijs/vscode-textmate': 9.3.1 '@types/hast': 3.0.4 - hast-util-to-html: 9.0.3 + hast-util-to-html: 9.0.4 - '@shikijs/engine-javascript@1.22.0': + '@shikijs/engine-javascript@1.24.2': dependencies: - '@shikijs/types': 1.22.0 - '@shikijs/vscode-textmate': 9.3.0 - oniguruma-to-js: 0.4.3 + '@shikijs/types': 1.24.2 + '@shikijs/vscode-textmate': 9.3.1 + oniguruma-to-es: 0.7.0 - '@shikijs/engine-oniguruma@1.22.0': + '@shikijs/engine-oniguruma@1.24.2': dependencies: - '@shikijs/types': 1.22.0 - '@shikijs/vscode-textmate': 9.3.0 + '@shikijs/types': 1.24.2 + '@shikijs/vscode-textmate': 9.3.1 - '@shikijs/types@1.22.0': + '@shikijs/types@1.24.2': dependencies: - '@shikijs/vscode-textmate': 9.3.0 + '@shikijs/vscode-textmate': 9.3.1 '@types/hast': 3.0.4 - '@shikijs/vscode-textmate@9.3.0': {} + '@shikijs/vscode-textmate@9.3.1': {} '@shuding/opentype.js@1.4.0-beta.0': dependencies: @@ -14271,21 +14343,12 @@ snapshots: '@sinclair/typebox@0.27.8': {} - '@sindresorhus/slugify@2.2.1': - dependencies: - '@sindresorhus/transliterate': 1.6.0 - escape-string-regexp: 5.0.0 - - '@sindresorhus/transliterate@1.6.0': - dependencies: - escape-string-regexp: 5.0.0 - '@storybook/channels@7.6.20': dependencies: '@storybook/client-logger': 7.6.20 '@storybook/core-events': 7.6.20 '@storybook/global': 5.0.0 - qs: 6.13.0 + qs: 6.13.1 telejson: 7.2.0 tiny-invariant: 1.3.3 @@ -14299,11 +14362,11 @@ snapshots: '@storybook/csf-tools@7.6.20': dependencies: - '@babel/generator': 7.25.9 - '@babel/parser': 7.25.9 - '@babel/traverse': 7.25.9 - '@babel/types': 7.25.9 - '@storybook/csf': 0.1.11 + '@babel/generator': 7.26.2 + '@babel/parser': 7.26.0 + '@babel/traverse': 7.26.4 + '@babel/types': 7.26.0 + '@storybook/csf': 0.1.12 '@storybook/types': 7.6.20 fs-extra: 11.2.0 recast: 0.23.9 @@ -14311,7 +14374,7 @@ snapshots: transitivePeerDependencies: - supports-color - '@storybook/csf@0.1.11': + '@storybook/csf@0.1.12': dependencies: type-fest: 2.19.0 @@ -14383,33 +14446,33 @@ snapshots: '@swc/counter@0.1.3': {} - '@swc/helpers@0.5.13': + '@swc/helpers@0.5.15': dependencies: - tslib: 2.8.0 + tslib: 2.8.1 '@swc/helpers@0.5.5': dependencies: '@swc/counter': 0.1.3 - tslib: 2.8.0 + tslib: 2.8.1 - '@tailwindcss/container-queries@0.1.1(tailwindcss@3.4.15(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)))': + '@tailwindcss/container-queries@0.1.1(tailwindcss@3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)))': dependencies: - tailwindcss: 3.4.15(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)) + tailwindcss: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) - '@tanem/react-nprogress@5.0.52(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@tanem/react-nprogress@5.0.53(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@babel/runtime': 7.25.9 + '@babel/runtime': 7.26.0 hoist-non-react-statics: 3.3.2 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@tanstack/react-virtual@3.10.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@tanstack/react-virtual@3.11.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@tanstack/virtual-core': 3.10.8 + '@tanstack/virtual-core': 3.10.9 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@tanstack/virtual-core@3.10.8': {} + '@tanstack/virtual-core@3.10.9': {} '@theguild/remark-mermaid@0.0.5(react@18.3.1)': dependencies: @@ -14424,57 +14487,57 @@ snapshots: npm-to-yarn: 2.2.1 unist-util-visit: 5.0.0 - '@theme-ui/color-modes@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1)': + '@theme-ui/color-modes@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1)': dependencies: - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) deepmerge: 4.3.1 react: 18.3.1 - '@theme-ui/components@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(@theme-ui/theme-provider@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(react@18.3.1)': + '@theme-ui/components@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(@theme-ui/theme-provider@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(react@18.3.1)': dependencies: - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) '@styled-system/color': 5.1.2 '@styled-system/should-forward-prop': 5.1.5 '@styled-system/space': 5.1.2 - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) - '@theme-ui/theme-provider': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@types/styled-system': 5.1.22 + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) + '@theme-ui/theme-provider': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@types/styled-system': 5.1.23 react: 18.3.1 - '@theme-ui/core@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1)': + '@theme-ui/core@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1)': dependencies: - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) deepmerge: 4.3.1 react: 18.3.1 - '@theme-ui/css@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))': + '@theme-ui/css@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))': dependencies: - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) csstype: 3.1.3 - '@theme-ui/global@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1)': + '@theme-ui/global@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1)': dependencies: - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) react: 18.3.1 - '@theme-ui/match-media@0.17.0(@theme-ui/core@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(@theme-ui/css@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)))(react@18.3.1)': + '@theme-ui/match-media@0.17.1(@theme-ui/core@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(@theme-ui/css@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)))(react@18.3.1)': dependencies: - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) react: 18.3.1 - '@theme-ui/theme-provider@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1)': + '@theme-ui/theme-provider@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1)': dependencies: - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) - '@theme-ui/color-modes': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) + '@theme-ui/color-modes': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) react: 18.3.1 '@ts-morph/common@0.24.0': @@ -14498,45 +14561,45 @@ snapshots: '@types/babel__core@7.20.5': dependencies: - '@babel/parser': 7.25.9 - '@babel/types': 7.25.9 + '@babel/parser': 7.26.0 + '@babel/types': 7.26.0 '@types/babel__generator': 7.6.8 '@types/babel__template': 7.4.4 '@types/babel__traverse': 7.20.6 '@types/babel__generator@7.6.8': dependencies: - '@babel/types': 7.25.9 + '@babel/types': 7.26.0 '@types/babel__template@7.4.4': dependencies: - '@babel/parser': 7.25.9 - '@babel/types': 7.25.9 + '@babel/parser': 7.26.0 + '@babel/types': 7.26.0 '@types/babel__traverse@7.20.6': dependencies: - '@babel/types': 7.25.9 + '@babel/types': 7.26.0 '@types/bn.js@4.11.6': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 '@types/bn.js@5.1.6': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 '@types/body-parser@1.19.5': dependencies: '@types/connect': 3.4.38 - '@types/node': 22.7.8 + '@types/node': 22.10.2 '@types/concat-stream@2.0.3': dependencies: - '@types/node': 18.19.58 + '@types/node': 18.19.68 '@types/connect@3.4.38': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 '@types/cookie@0.6.0': {} @@ -14554,17 +14617,17 @@ snapshots: '@types/d3-path@3.1.0': {} - '@types/d3-scale-chromatic@3.0.3': {} + '@types/d3-scale-chromatic@3.1.0': {} '@types/d3-scale@4.0.8': dependencies: - '@types/d3-time': 3.0.3 + '@types/d3-time': 3.0.4 '@types/d3-shape@3.1.6': dependencies: '@types/d3-path': 3.1.0 - '@types/d3-time@3.0.3': {} + '@types/d3-time@3.0.4': {} '@types/d3-timer@3.0.2': {} @@ -14580,8 +14643,8 @@ snapshots: '@types/express-serve-static-core@4.19.6': dependencies: - '@types/node': 22.7.8 - '@types/qs': 6.9.16 + '@types/node': 22.10.2 + '@types/qs': 6.9.17 '@types/range-parser': 1.2.7 '@types/send': 0.17.4 @@ -14589,7 +14652,7 @@ snapshots: dependencies: '@types/body-parser': 1.19.5 '@types/express-serve-static-core': 4.19.6 - '@types/qs': 6.9.16 + '@types/qs': 6.9.17 '@types/serve-static': 1.15.7 '@types/hast@2.3.10': @@ -14636,62 +14699,62 @@ snapshots: '@types/node-forge@1.3.11': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 - '@types/node@18.19.58': + '@types/node@18.19.68': dependencies: undici-types: 5.26.5 - '@types/node@22.7.8': + '@types/node@22.10.2': dependencies: - undici-types: 6.19.8 + undici-types: 6.20.0 '@types/parse-json@4.0.2': {} '@types/pbkdf2@3.1.2': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 - '@types/prismjs@1.26.4': {} + '@types/prismjs@1.26.5': {} - '@types/prop-types@15.7.13': {} + '@types/prop-types@15.7.14': {} - '@types/qs@6.9.16': {} + '@types/qs@6.9.17': {} '@types/range-parser@1.2.7': {} - '@types/react-dom@18.3.1': + '@types/react-dom@18.3.5(@types/react@18.3.16)': dependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - '@types/react@18.3.12': + '@types/react@18.3.16': dependencies: - '@types/prop-types': 15.7.13 + '@types/prop-types': 15.7.14 csstype: 3.1.3 '@types/readable-stream@2.3.15': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 safe-buffer: 5.1.2 '@types/secp256k1@4.0.6': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 '@types/semver@7.5.8': {} '@types/send@0.17.4': dependencies: '@types/mime': 1.3.5 - '@types/node': 22.7.8 + '@types/node': 22.10.2 '@types/serve-static@1.15.7': dependencies: '@types/http-errors': 2.0.4 - '@types/node': 22.7.8 + '@types/node': 22.10.2 '@types/send': 0.17.4 - '@types/styled-system@5.1.22': + '@types/styled-system@5.1.23': dependencies: csstype: 3.1.3 @@ -14703,40 +14766,40 @@ snapshots: '@types/validator@13.12.2': {} - '@types/ws@8.5.12': + '@types/ws@8.5.13': dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 - '@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3)': + '@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2))(eslint@8.57.1)(typescript@5.7.2)': dependencies: - '@eslint-community/regexpp': 4.11.1 - '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.6.3) + '@eslint-community/regexpp': 4.12.1 + '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.7.2) '@typescript-eslint/scope-manager': 6.21.0 - '@typescript-eslint/type-utils': 6.21.0(eslint@8.57.1)(typescript@5.6.3) - '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/type-utils': 6.21.0(eslint@8.57.1)(typescript@5.7.2) + '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.7.2) '@typescript-eslint/visitor-keys': 6.21.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) eslint: 8.57.1 graphemer: 1.4.0 ignore: 5.3.2 natural-compare: 1.4.0 semver: 7.6.3 - ts-api-utils: 1.3.0(typescript@5.6.3) + ts-api-utils: 1.4.3(typescript@5.7.2) optionalDependencies: - typescript: 5.6.3 + typescript: 5.7.2 transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3)': + '@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2)': dependencies: '@typescript-eslint/scope-manager': 6.21.0 '@typescript-eslint/types': 6.21.0 - '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.6.3) + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.7.2) '@typescript-eslint/visitor-keys': 6.21.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) eslint: 8.57.1 optionalDependencies: - typescript: 5.6.3 + typescript: 5.7.2 transitivePeerDependencies: - supports-color @@ -14745,43 +14808,43 @@ snapshots: '@typescript-eslint/types': 6.21.0 '@typescript-eslint/visitor-keys': 6.21.0 - '@typescript-eslint/type-utils@6.21.0(eslint@8.57.1)(typescript@5.6.3)': + '@typescript-eslint/type-utils@6.21.0(eslint@8.57.1)(typescript@5.7.2)': dependencies: - '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.6.3) - '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.6.3) - debug: 4.3.7(supports-color@8.1.1) + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.7.2) + '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.7.2) + debug: 4.4.0(supports-color@8.1.1) eslint: 8.57.1 - ts-api-utils: 1.3.0(typescript@5.6.3) + ts-api-utils: 1.4.3(typescript@5.7.2) optionalDependencies: - typescript: 5.6.3 + typescript: 5.7.2 transitivePeerDependencies: - supports-color '@typescript-eslint/types@6.21.0': {} - '@typescript-eslint/typescript-estree@6.21.0(typescript@5.6.3)': + '@typescript-eslint/typescript-estree@6.21.0(typescript@5.7.2)': dependencies: '@typescript-eslint/types': 6.21.0 '@typescript-eslint/visitor-keys': 6.21.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) globby: 11.1.0 is-glob: 4.0.3 minimatch: 9.0.3 semver: 7.6.3 - ts-api-utils: 1.3.0(typescript@5.6.3) + ts-api-utils: 1.4.3(typescript@5.7.2) optionalDependencies: - typescript: 5.6.3 + typescript: 5.7.2 transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@6.21.0(eslint@8.57.1)(typescript@5.6.3)': + '@typescript-eslint/utils@6.21.0(eslint@8.57.1)(typescript@5.7.2)': dependencies: - '@eslint-community/eslint-utils': 4.4.0(eslint@8.57.1) + '@eslint-community/eslint-utils': 4.4.1(eslint@8.57.1) '@types/json-schema': 7.0.15 '@types/semver': 7.5.8 '@typescript-eslint/scope-manager': 6.21.0 '@typescript-eslint/types': 6.21.0 - '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.6.3) + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.7.2) eslint: 8.57.1 semver: 7.6.3 transitivePeerDependencies: @@ -14793,7 +14856,7 @@ snapshots: '@typescript-eslint/types': 6.21.0 eslint-visitor-keys: 3.4.3 - '@ungap/structured-clone@1.2.0': {} + '@ungap/structured-clone@1.2.1': {} '@uniswap/lib@4.0.1-alpha': {} @@ -14809,14 +14872,26 @@ snapshots: tiny-invariant: 1.3.3 toformat: 2.0.0 - '@uniswap/swap-router-contracts@1.3.1(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))': + '@uniswap/sdk-core@6.1.0': + dependencies: + '@ethersproject/address': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/strings': 5.7.0 + big.js: 5.2.2 + decimal.js-light: 2.5.1 + jsbi: 3.2.5 + tiny-invariant: 1.3.3 + toformat: 2.0.0 + + '@uniswap/swap-router-contracts@1.3.1(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))': dependencies: '@openzeppelin/contracts': 3.4.2-solc-0.7 '@uniswap/v2-core': 1.0.1 '@uniswap/v3-core': 1.0.1 '@uniswap/v3-periphery': 1.4.4 dotenv: 14.3.2 - hardhat-watcher: 2.5.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) + hardhat-watcher: 2.5.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) transitivePeerDependencies: - hardhat @@ -14834,12 +14909,12 @@ snapshots: '@uniswap/v3-core': 1.0.1 base64-sol: 1.0.1 - '@uniswap/v3-sdk@3.18.1(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3))': + '@uniswap/v3-sdk@3.19.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))': dependencies: '@ethersproject/abi': 5.7.0 '@ethersproject/solidity': 5.7.0 - '@uniswap/sdk-core': 5.9.0 - '@uniswap/swap-router-contracts': 1.3.1(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) + '@uniswap/sdk-core': 6.1.0 + '@uniswap/swap-router-contracts': 1.3.1(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) '@uniswap/v3-periphery': 1.4.4 '@uniswap/v3-staker': 1.0.0 tiny-invariant: 1.3.3 @@ -14878,7 +14953,7 @@ snapshots: '@vitest/snapshot@1.6.0': dependencies: - magic-string: 0.30.12 + magic-string: 0.30.15 pathe: 1.1.2 pretty-format: 29.7.0 @@ -14893,76 +14968,65 @@ snapshots: loupe: 2.3.7 pretty-format: 29.7.0 - '@web3icons/common@0.4.1(typescript@5.6.3)': + '@web3icons/common@0.7.2(typescript@5.7.2)': dependencies: - typescript: 5.6.3 + typescript: 5.7.2 - '@web3icons/react@3.10.1(react@18.3.1)(typescript@5.6.3)': + '@web3icons/react@3.13.2(react@18.3.1)(typescript@5.7.2)': dependencies: - '@web3icons/common': 0.4.1(typescript@5.6.3) + '@web3icons/common': 0.7.2(typescript@5.7.2) react: 18.3.1 transitivePeerDependencies: - typescript '@whatwg-node/disposablestack@0.0.5': dependencies: - tslib: 2.8.0 + tslib: 2.8.1 '@whatwg-node/events@0.1.2': dependencies: - tslib: 2.8.0 + tslib: 2.8.1 - '@whatwg-node/fetch@0.9.22': + '@whatwg-node/fetch@0.10.1': dependencies: - '@whatwg-node/node-fetch': 0.5.27 + '@whatwg-node/node-fetch': 0.7.5 urlpattern-polyfill: 10.0.0 - '@whatwg-node/node-fetch@0.5.27': + '@whatwg-node/fetch@0.9.23': + dependencies: + '@whatwg-node/node-fetch': 0.6.0 + urlpattern-polyfill: 10.0.0 + + '@whatwg-node/node-fetch@0.6.0': dependencies: '@kamilkisiela/fast-url-parser': 1.1.4 busboy: 1.6.0 fast-querystring: 1.1.2 - tslib: 2.8.0 + tslib: 2.8.1 - '@whatwg-node/server@0.9.50': + '@whatwg-node/node-fetch@0.7.5': dependencies: - '@whatwg-node/fetch': 0.9.22 - tslib: 2.8.0 - - '@wry/caches@1.0.1': - dependencies: - tslib: 2.8.0 - optional: true - - '@wry/context@0.7.4': - dependencies: - tslib: 2.8.0 - optional: true - - '@wry/equality@0.5.7': - dependencies: - tslib: 2.8.0 - optional: true - - '@wry/trie@0.4.3': - dependencies: - tslib: 2.8.0 - optional: true + '@kamilkisiela/fast-url-parser': 1.1.4 + '@whatwg-node/disposablestack': 0.0.5 + busboy: 1.6.0 + fast-querystring: 1.1.2 + tslib: 2.8.1 - '@wry/trie@0.5.0': + '@whatwg-node/server@0.9.63': dependencies: - tslib: 2.8.0 - optional: true + '@whatwg-node/disposablestack': 0.0.5 + '@whatwg-node/fetch': 0.10.1 + tslib: 2.8.1 '@xobotyi/scrollbar-width@1.9.5': {} '@xstate/fsm@1.6.5': {} - '@xstate/react@3.2.2(@types/react@18.3.12)(react@18.3.1)(xstate@4.38.3)': + '@xstate/react@3.2.2(@types/react@18.3.16)(react@18.3.1)(xstate@4.38.3)': dependencies: react: 18.3.1 - use-isomorphic-layout-effect: 1.1.2(@types/react@18.3.12)(react@18.3.1) - use-sync-external-store: 1.2.2(react@18.3.1) + use-isomorphic-layout-effect: 1.2.0(@types/react@18.3.16)(react@18.3.1) + use-sync-external-store: 1.4.0(react@18.3.1) optionalDependencies: xstate: 4.38.3 transitivePeerDependencies: @@ -14992,15 +15056,15 @@ snapshots: mime-types: 2.1.35 negotiator: 0.6.3 - acorn-jsx@5.3.2(acorn@8.13.0): + acorn-jsx@5.3.2(acorn@8.14.0): dependencies: - acorn: 8.13.0 + acorn: 8.14.0 acorn-walk@8.3.4: dependencies: - acorn: 8.13.0 + acorn: 8.14.0 - acorn@8.13.0: {} + acorn@8.14.0: {} adm-zip@0.4.16: {} @@ -15008,15 +15072,11 @@ snapshots: agent-base@6.0.2: dependencies: - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color - agent-base@7.1.1: - dependencies: - debug: 4.3.7(supports-color@8.1.1) - transitivePeerDependencies: - - supports-color + agent-base@7.1.3: {} aggregate-error@3.1.0: dependencies: @@ -15041,21 +15101,21 @@ snapshots: json-schema-traverse: 1.0.0 require-from-string: 2.0.2 - algoliasearch@5.14.2: - dependencies: - '@algolia/client-abtesting': 5.14.2 - '@algolia/client-analytics': 5.14.2 - '@algolia/client-common': 5.14.2 - '@algolia/client-insights': 5.14.2 - '@algolia/client-personalization': 5.14.2 - '@algolia/client-query-suggestions': 5.14.2 - '@algolia/client-search': 5.14.2 - '@algolia/ingestion': 1.14.2 - '@algolia/monitoring': 1.14.2 - '@algolia/recommend': 5.14.2 - '@algolia/requester-browser-xhr': 5.14.2 - '@algolia/requester-fetch': 5.14.2 - '@algolia/requester-node-http': 5.14.2 + algoliasearch@5.17.1: + dependencies: + '@algolia/client-abtesting': 5.17.1 + '@algolia/client-analytics': 5.17.1 + '@algolia/client-common': 5.17.1 + '@algolia/client-insights': 5.17.1 + '@algolia/client-personalization': 5.17.1 + '@algolia/client-query-suggestions': 5.17.1 + '@algolia/client-search': 5.17.1 + '@algolia/ingestion': 1.17.1 + '@algolia/monitoring': 1.17.1 + '@algolia/recommend': 5.17.1 + '@algolia/requester-browser-xhr': 5.17.1 + '@algolia/requester-fetch': 5.17.1 + '@algolia/requester-node-http': 5.17.1 ansi-align@3.0.1: dependencies: @@ -15122,76 +15182,76 @@ snapshots: aria-hidden@1.2.4: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 aria-query@5.3.2: {} array-buffer-byte-length@1.0.1: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 is-array-buffer: 3.0.4 array-flatten@1.1.1: {} array-includes@3.1.8: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-object-atoms: 1.0.0 - get-intrinsic: 1.2.4 - is-string: 1.0.7 + get-intrinsic: 1.2.6 + is-string: 1.1.0 array-union@2.1.0: {} array.prototype.findlast@1.2.5: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 es-object-atoms: 1.0.0 es-shim-unscopables: 1.0.2 array.prototype.findlastindex@1.2.5: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 es-object-atoms: 1.0.0 es-shim-unscopables: 1.0.2 array.prototype.flat@1.3.2: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-shim-unscopables: 1.0.2 array.prototype.flatmap@1.3.2: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-shim-unscopables: 1.0.2 array.prototype.tosorted@1.1.4: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 es-shim-unscopables: 1.0.2 arraybuffer.prototype.slice@1.0.3: dependencies: array-buffer-byte-length: 1.0.1 - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 - get-intrinsic: 1.2.4 + get-intrinsic: 1.2.6 is-array-buffer: 3.0.4 is-shared-array-buffer: 1.0.3 @@ -15207,26 +15267,26 @@ snapshots: ast-types@0.16.1: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 astring@1.9.0: {} async-mutex@0.4.1: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 asynckit@0.4.0: {} atomic-sleep@1.0.0: {} - attr-accept@2.2.4: {} + attr-accept@2.2.5: {} auto-bind@4.0.0: {} autoprefixer@10.4.20(postcss@8.4.49): dependencies: - browserslist: 4.24.2 - caniuse-lite: 1.0.30001669 + browserslist: 4.24.3 + caniuse-lite: 1.0.30001688 fraction.js: 4.3.7 normalize-range: 0.1.2 picocolors: 1.1.1 @@ -15237,55 +15297,47 @@ snapshots: dependencies: possible-typed-array-names: 1.0.0 - axe-core@4.10.1: {} - - axios@1.7.7: - dependencies: - follow-redirects: 1.15.9(debug@4.3.7) - form-data: 4.0.1 - proxy-from-env: 1.1.0 - transitivePeerDependencies: - - debug + axe-core@4.10.2: {} axobject-query@4.1.0: {} babel-plugin-macros@3.1.0: dependencies: - '@babel/runtime': 7.25.9 + '@babel/runtime': 7.26.0 cosmiconfig: 7.1.0 - resolve: 1.22.8 + resolve: 1.22.9 babel-plugin-syntax-trailing-function-commas@7.0.0-beta.0: {} - babel-preset-fbjs@3.4.0(@babel/core@7.25.9): - dependencies: - '@babel/core': 7.25.9 - '@babel/plugin-proposal-class-properties': 7.18.6(@babel/core@7.25.9) - '@babel/plugin-proposal-object-rest-spread': 7.20.7(@babel/core@7.25.9) - '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.25.9) - '@babel/plugin-syntax-flow': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.25.9) - '@babel/plugin-transform-arrow-functions': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-block-scoped-functions': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-block-scoping': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-classes': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-computed-properties': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-destructuring': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-flow-strip-types': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-for-of': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-function-name': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-literals': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-member-expression-literals': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-modules-commonjs': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-object-super': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-parameters': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-property-literals': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-react-display-name': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-react-jsx': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-shorthand-properties': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-spread': 7.25.9(@babel/core@7.25.9) - '@babel/plugin-transform-template-literals': 7.25.9(@babel/core@7.25.9) + babel-preset-fbjs@3.4.0(@babel/core@7.26.0): + dependencies: + '@babel/core': 7.26.0 + '@babel/plugin-proposal-class-properties': 7.18.6(@babel/core@7.26.0) + '@babel/plugin-proposal-object-rest-spread': 7.20.7(@babel/core@7.26.0) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.26.0) + '@babel/plugin-syntax-flow': 7.26.0(@babel/core@7.26.0) + '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.26.0) + '@babel/plugin-transform-arrow-functions': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-block-scoped-functions': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-block-scoping': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-classes': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-computed-properties': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-destructuring': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-flow-strip-types': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-for-of': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-function-name': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-literals': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-member-expression-literals': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-modules-commonjs': 7.26.3(@babel/core@7.26.0) + '@babel/plugin-transform-object-super': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-parameters': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-property-literals': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-react-display-name': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-react-jsx': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-shorthand-properties': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-spread': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-template-literals': 7.25.9(@babel/core@7.26.0) babel-plugin-syntax-trailing-function-commas: 7.0.0-beta.0 transitivePeerDependencies: - supports-color @@ -15332,7 +15384,7 @@ snapshots: blakejs@1.2.1: {} - bn.js@4.12.0: {} + bn.js@4.12.1: {} bn.js@5.2.1: {} @@ -15408,18 +15460,18 @@ snapshots: browserify-aes@1.2.0: dependencies: buffer-xor: 1.0.3 - cipher-base: 1.0.4 + cipher-base: 1.0.6 create-hash: 1.2.0 evp_bytestokey: 1.0.3 inherits: 2.0.4 safe-buffer: 5.2.1 - browserslist@4.24.2: + browserslist@4.24.3: dependencies: - caniuse-lite: 1.0.30001669 - electron-to-chromium: 1.5.42 - node-releases: 2.0.18 - update-browserslist-db: 1.1.1(browserslist@4.24.2) + caniuse-lite: 1.0.30001688 + electron-to-chromium: 1.5.73 + node-releases: 2.0.19 + update-browserslist-db: 1.1.1(browserslist@4.24.3) bs58@4.0.1: dependencies: @@ -15484,20 +15536,29 @@ snapshots: unique-filename: 3.0.0 optional: true - call-bind@1.0.7: + call-bind-apply-helpers@1.0.1: dependencies: - es-define-property: 1.0.0 es-errors: 1.3.0 function-bind: 1.1.2 - get-intrinsic: 1.2.4 + + call-bind@1.0.8: + dependencies: + call-bind-apply-helpers: 1.0.1 + es-define-property: 1.0.1 + get-intrinsic: 1.2.6 set-function-length: 1.2.2 + call-bound@1.0.2: + dependencies: + call-bind: 1.0.8 + get-intrinsic: 1.2.6 + callsites@3.1.0: {} camel-case@4.1.2: dependencies: pascal-case: 3.1.2 - tslib: 2.8.0 + tslib: 2.8.1 camelcase-css@2.0.1: {} @@ -15507,18 +15568,18 @@ snapshots: camelize@1.0.1: {} - caniuse-lite@1.0.30001669: {} + caniuse-lite@1.0.30001688: {} capital-case@1.0.4: dependencies: no-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 upper-case-first: 2.0.2 capnp-ts@0.7.0: dependencies: - debug: 4.3.7(supports-color@8.1.1) - tslib: 2.8.0 + debug: 4.4.0(supports-color@8.1.1) + tslib: 2.8.1 transitivePeerDependencies: - supports-color @@ -15594,7 +15655,7 @@ snapshots: path-case: 3.0.4 sentence-case: 3.0.4 snake-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 character-entities-html4@2.1.0: {} @@ -15639,9 +15700,9 @@ snapshots: ci-info@2.0.0: {} - ci-info@4.0.0: {} + ci-info@4.1.0: {} - cipher-base@1.0.4: + cipher-base@1.0.6: dependencies: inherits: 2.0.4 safe-buffer: 5.2.1 @@ -15652,7 +15713,7 @@ snapshots: catering: 2.1.1 module-error: 1.0.2 napi-macros: 2.2.2 - node-gyp-build: 4.8.2 + node-gyp-build: 4.8.4 classnames@2.5.1: {} @@ -15769,7 +15830,7 @@ snapshots: constant-case@3.0.4: dependencies: no-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 upper-case: 2.0.2 content-disposition@0.5.4: @@ -15811,20 +15872,20 @@ snapshots: path-type: 4.0.0 yaml: 1.10.2 - cosmiconfig@9.0.0(typescript@5.6.3): + cosmiconfig@9.0.0(typescript@5.7.2): dependencies: env-paths: 2.2.1 import-fresh: 3.3.0 js-yaml: 4.1.0 parse-json: 5.2.0 optionalDependencies: - typescript: 5.6.3 + typescript: 5.7.2 crc-32@1.2.2: {} create-hash@1.2.0: dependencies: - cipher-base: 1.0.4 + cipher-base: 1.0.6 inherits: 2.0.4 md5.js: 1.3.5 ripemd160: 2.0.2 @@ -15832,7 +15893,7 @@ snapshots: create-hmac@1.1.7: dependencies: - cipher-base: 1.0.4 + cipher-base: 1.0.6 create-hash: 1.2.0 inherits: 2.0.4 ripemd160: 2.0.2 @@ -15855,7 +15916,7 @@ snapshots: cross-inspect@1.0.1: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 cross-spawn@5.1.0: dependencies: @@ -15863,7 +15924,7 @@ snapshots: shebang-command: 1.2.0 which: 1.3.1 - cross-spawn@7.0.3: + cross-spawn@7.0.6: dependencies: path-key: 3.1.1 shebang-command: 2.0.0 @@ -15900,12 +15961,12 @@ snapshots: csstype@3.1.3: {} - cytoscape-cose-bilkent@4.1.0(cytoscape@3.30.2): + cytoscape-cose-bilkent@4.1.0(cytoscape@3.30.4): dependencies: cose-base: 1.0.3 - cytoscape: 3.30.2 + cytoscape: 3.30.4 - cytoscape@3.30.2: {} + cytoscape@3.30.4: {} d3-array@2.12.1: dependencies: @@ -16086,27 +16147,27 @@ snapshots: data-urls@5.0.0: dependencies: whatwg-mimetype: 4.0.0 - whatwg-url: 14.0.0 + whatwg-url: 14.1.0 data-view-buffer@1.0.1: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 es-errors: 1.3.0 - is-data-view: 1.0.1 + is-data-view: 1.0.2 data-view-byte-length@1.0.1: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 es-errors: 1.3.0 - is-data-view: 1.0.1 + is-data-view: 1.0.2 data-view-byte-offset@1.0.0: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 es-errors: 1.3.0 - is-data-view: 1.0.1 + is-data-view: 1.0.2 - dataloader@2.2.2: {} + dataloader@2.2.3: {} date-fns@4.1.0: {} @@ -16120,7 +16181,7 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.3.7(supports-color@8.1.1): + debug@4.4.0(supports-color@8.1.1): dependencies: ms: 2.1.3 optionalDependencies: @@ -16152,9 +16213,9 @@ snapshots: define-data-property@1.1.4: dependencies: - es-define-property: 1.0.0 + es-define-property: 1.0.1 es-errors: 1.3.0 - gopd: 1.0.1 + gopd: 1.2.0 define-properties@1.2.1: dependencies: @@ -16204,18 +16265,19 @@ snapshots: dependencies: path-type: 4.0.0 - disposablestack@1.1.6: + disposablestack@1.1.7: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 + call-bound: 1.0.2 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 es-set-tostringtag: 2.0.3 - get-intrinsic: 1.2.4 + get-intrinsic: 1.2.6 globalthis: 1.0.4 - has-symbols: 1.0.3 + has-symbols: 1.1.0 hasown: 2.0.2 - internal-slot: 1.0.7 + internal-slot: 1.1.0 suppressed-error: 1.0.3 dlv@1.1.3: {} @@ -16230,7 +16292,7 @@ snapshots: dom-helpers@5.2.1: dependencies: - '@babel/runtime': 7.25.9 + '@babel/runtime': 7.26.0 csstype: 3.1.3 dompurify@3.1.6: {} @@ -16238,16 +16300,22 @@ snapshots: dot-case@3.0.4: dependencies: no-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 dotenv@14.3.2: {} - dotenv@16.4.5: {} + dotenv@16.4.7: {} dottie@2.0.6: {} dset@3.1.4: {} + dunder-proto@1.0.0: + dependencies: + call-bind-apply-helpers: 1.0.1 + es-errors: 1.3.0 + gopd: 1.2.0 + duplexify@4.1.3: dependencies: end-of-stream: 1.4.4 @@ -16259,13 +16327,13 @@ snapshots: ee-first@1.1.1: {} - electron-to-chromium@1.5.42: {} + electron-to-chromium@1.5.73: {} elkjs@0.9.3: {} elliptic@6.5.4: dependencies: - bn.js: 4.12.0 + bn.js: 4.12.1 brorand: 1.1.0 hash.js: 1.1.7 hmac-drbg: 1.0.1 @@ -16273,9 +16341,9 @@ snapshots: minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 - elliptic@6.5.7: + elliptic@6.6.1: dependencies: - bn.js: 4.12.0 + bn.js: 4.12.1 brorand: 1.1.0 hash.js: 1.1.7 hmac-drbg: 1.0.1 @@ -16283,6 +16351,8 @@ snapshots: minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 + emoji-regex-xs@1.0.0: {} + emoji-regex@10.4.0: {} emoji-regex@8.0.0: {} @@ -16328,77 +16398,76 @@ snapshots: dependencies: stackframe: 1.3.4 - es-abstract@1.23.3: + es-abstract@1.23.5: dependencies: array-buffer-byte-length: 1.0.1 arraybuffer.prototype.slice: 1.0.3 available-typed-arrays: 1.0.7 - call-bind: 1.0.7 + call-bind: 1.0.8 data-view-buffer: 1.0.1 data-view-byte-length: 1.0.1 data-view-byte-offset: 1.0.0 - es-define-property: 1.0.0 + es-define-property: 1.0.1 es-errors: 1.3.0 es-object-atoms: 1.0.0 es-set-tostringtag: 2.0.3 - es-to-primitive: 1.2.1 + es-to-primitive: 1.3.0 function.prototype.name: 1.1.6 - get-intrinsic: 1.2.4 + get-intrinsic: 1.2.6 get-symbol-description: 1.0.2 globalthis: 1.0.4 - gopd: 1.0.1 + gopd: 1.2.0 has-property-descriptors: 1.0.2 - has-proto: 1.0.3 - has-symbols: 1.0.3 + has-proto: 1.2.0 + has-symbols: 1.1.0 hasown: 2.0.2 - internal-slot: 1.0.7 + internal-slot: 1.1.0 is-array-buffer: 3.0.4 is-callable: 1.2.7 - is-data-view: 1.0.1 + is-data-view: 1.0.2 is-negative-zero: 2.0.3 - is-regex: 1.1.4 + is-regex: 1.2.1 is-shared-array-buffer: 1.0.3 - is-string: 1.0.7 + is-string: 1.1.0 is-typed-array: 1.1.13 - is-weakref: 1.0.2 - object-inspect: 1.13.2 + is-weakref: 1.1.0 + object-inspect: 1.13.3 object-keys: 1.1.1 object.assign: 4.1.5 regexp.prototype.flags: 1.5.3 - safe-array-concat: 1.1.2 - safe-regex-test: 1.0.3 - string.prototype.trim: 1.2.9 - string.prototype.trimend: 1.0.8 + safe-array-concat: 1.1.3 + safe-regex-test: 1.1.0 + string.prototype.trim: 1.2.10 + string.prototype.trimend: 1.0.9 string.prototype.trimstart: 1.0.8 typed-array-buffer: 1.0.2 typed-array-byte-length: 1.0.1 - typed-array-byte-offset: 1.0.2 - typed-array-length: 1.0.6 + typed-array-byte-offset: 1.0.3 + typed-array-length: 1.0.7 unbox-primitive: 1.0.2 - which-typed-array: 1.1.15 + which-typed-array: 1.1.16 - es-define-property@1.0.0: - dependencies: - get-intrinsic: 1.2.4 + es-define-property@1.0.1: {} es-errors@1.3.0: {} - es-iterator-helpers@1.1.0: + es-iterator-helpers@1.2.0: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 es-set-tostringtag: 2.0.3 function-bind: 1.1.2 - get-intrinsic: 1.2.4 + get-intrinsic: 1.2.6 globalthis: 1.0.4 + gopd: 1.2.0 has-property-descriptors: 1.0.2 - has-proto: 1.0.3 - has-symbols: 1.0.3 - internal-slot: 1.0.7 - iterator.prototype: 1.1.3 - safe-array-concat: 1.1.2 + has-proto: 1.2.0 + has-symbols: 1.1.0 + internal-slot: 1.1.0 + iterator.prototype: 1.1.4 + safe-array-concat: 1.1.3 es-object-atoms@1.0.0: dependencies: @@ -16406,7 +16475,7 @@ snapshots: es-set-tostringtag@2.0.3: dependencies: - get-intrinsic: 1.2.4 + get-intrinsic: 1.2.6 has-tostringtag: 1.0.2 hasown: 2.0.2 @@ -16414,11 +16483,11 @@ snapshots: dependencies: hasown: 2.0.2 - es-to-primitive@1.2.1: + es-to-primitive@1.3.0: dependencies: is-callable: 1.2.7 - is-date-object: 1.0.5 - is-symbol: 1.0.4 + is-date-object: 1.1.0 + is-symbol: 1.1.1 esbuild@0.17.19: optionalDependencies: @@ -16538,34 +16607,31 @@ snapshots: eslint-import-resolver-node@0.3.9: dependencies: debug: 3.2.7 - is-core-module: 2.15.1 - resolve: 1.22.8 + is-core-module: 2.16.0 + resolve: 1.22.9 transitivePeerDependencies: - supports-color - eslint-import-resolver-typescript@3.6.3(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.31.0)(eslint@8.57.1): + eslint-import-resolver-typescript@3.7.0(eslint-plugin-import@2.31.0)(eslint@8.57.1): dependencies: '@nolyfill/is-core-module': 1.0.39 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) enhanced-resolve: 5.17.1 eslint: 8.57.1 - eslint-module-utils: 2.12.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) fast-glob: 3.3.2 get-tsconfig: 4.8.1 - is-bun-module: 1.2.1 + is-bun-module: 1.3.0 is-glob: 4.0.3 + stable-hash: 0.0.4 optionalDependencies: - eslint-plugin-import: 2.31.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) + eslint-plugin-import: 2.31.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2))(eslint-import-resolver-typescript@3.7.0)(eslint@8.57.1) transitivePeerDependencies: - - '@typescript-eslint/parser' - - eslint-import-resolver-node - - eslint-import-resolver-webpack - supports-color eslint-mdx@2.3.4(eslint@8.57.1): dependencies: - acorn: 8.13.0 - acorn-jsx: 5.3.2(acorn@8.13.0) + acorn: 8.14.0 + acorn-jsx: 5.3.2(acorn@8.14.0) eslint: 8.57.1 espree: 9.6.1 estree-util-visit: 1.2.1 @@ -16573,7 +16639,7 @@ snapshots: remark-parse: 10.0.2 remark-stringify: 10.0.3 synckit: 0.9.2 - tslib: 2.8.0 + tslib: 2.8.1 unified: 10.1.2 unified-engine: 10.1.0 unist-util-visit: 4.1.2 @@ -16582,18 +16648,18 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1): + eslint-module-utils@2.12.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0)(eslint@8.57.1): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.7.2) eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.6.3(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.31.0)(eslint@8.57.1) + eslint-import-resolver-typescript: 3.7.0(eslint-plugin-import@2.31.0)(eslint@8.57.1) transitivePeerDependencies: - supports-color - eslint-plugin-import@2.31.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1): + eslint-plugin-import@2.31.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2))(eslint-import-resolver-typescript@3.7.0)(eslint@8.57.1): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.8 @@ -16604,42 +16670,41 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.1 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) + eslint-module-utils: 2.12.0(@typescript-eslint/parser@6.21.0(eslint@8.57.1)(typescript@5.7.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0)(eslint@8.57.1) hasown: 2.0.2 - is-core-module: 2.15.1 + is-core-module: 2.16.0 is-glob: 4.0.3 minimatch: 3.1.2 object.fromentries: 2.0.8 object.groupby: 1.0.3 object.values: 1.2.0 semver: 6.3.1 - string.prototype.trimend: 1.0.8 + string.prototype.trimend: 1.0.9 tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': 6.21.0(eslint@8.57.1)(typescript@5.7.2) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack - supports-color - eslint-plugin-jsx-a11y@6.10.1(eslint@8.57.1): + eslint-plugin-jsx-a11y@6.10.2(eslint@8.57.1): dependencies: aria-query: 5.3.2 array-includes: 3.1.8 array.prototype.flatmap: 1.3.2 ast-types-flow: 0.0.8 - axe-core: 4.10.1 + axe-core: 4.10.2 axobject-query: 4.1.0 damerau-levenshtein: 1.0.8 emoji-regex: 9.2.2 - es-iterator-helpers: 1.1.0 eslint: 8.57.1 hasown: 2.0.2 jsx-ast-utils: 3.3.5 language-tags: 1.0.9 minimatch: 3.1.2 object.fromentries: 2.0.8 - safe-regex-test: 1.0.3 + safe-regex-test: 1.1.0 string.prototype.includes: 2.0.1 eslint-plugin-markdown@3.0.1(eslint@8.57.1): @@ -16657,7 +16722,7 @@ snapshots: remark-mdx: 2.3.0 remark-parse: 10.0.2 remark-stringify: 10.0.3 - tslib: 2.8.0 + tslib: 2.8.1 unified: 10.1.2 vfile: 5.3.7 transitivePeerDependencies: @@ -16674,7 +16739,7 @@ snapshots: array.prototype.flatmap: 1.3.2 array.prototype.tosorted: 1.1.4 doctrine: 2.1.0 - es-iterator-helpers: 1.1.0 + es-iterator-helpers: 1.2.0 eslint: 8.57.1 estraverse: 5.3.0 hasown: 2.0.2 @@ -16706,18 +16771,18 @@ snapshots: eslint@8.57.1: dependencies: - '@eslint-community/eslint-utils': 4.4.0(eslint@8.57.1) - '@eslint-community/regexpp': 4.11.1 + '@eslint-community/eslint-utils': 4.4.1(eslint@8.57.1) + '@eslint-community/regexpp': 4.12.1 '@eslint/eslintrc': 2.1.4 '@eslint/js': 8.57.1 '@humanwhocodes/config-array': 0.13.0 '@humanwhocodes/module-importer': 1.0.1 '@nodelib/fs.walk': 1.2.8 - '@ungap/structured-clone': 1.2.0 + '@ungap/structured-clone': 1.2.1 ajv: 6.12.6 chalk: 4.1.2 - cross-spawn: 7.0.3 - debug: 4.3.7(supports-color@8.1.1) + cross-spawn: 7.0.6 + debug: 4.4.0(supports-color@8.1.1) doctrine: 3.0.0 escape-string-regexp: 4.0.0 eslint-scope: 7.2.2 @@ -16749,8 +16814,8 @@ snapshots: espree@9.6.1: dependencies: - acorn: 8.13.0 - acorn-jsx: 5.3.2(acorn@8.13.0) + acorn: 8.14.0 + acorn-jsx: 5.3.2(acorn@8.14.0) eslint-visitor-keys: 3.4.3 esprima@4.0.1: {} @@ -16829,15 +16894,15 @@ snapshots: ethereumjs-abi@0.6.8: dependencies: - bn.js: 4.12.0 + bn.js: 4.12.1 ethereumjs-util: 6.2.1 ethereumjs-util@6.2.1: dependencies: '@types/bn.js': 4.11.6 - bn.js: 4.12.0 + bn.js: 4.12.1 create-hash: 1.2.0 - elliptic: 6.5.7 + elliptic: 6.6.1 ethereum-cryptography: 0.1.3 ethjs-util: 0.1.6 rlp: 2.2.7 @@ -16948,7 +17013,7 @@ snapshots: execa@8.0.1: dependencies: - cross-spawn: 7.0.3 + cross-spawn: 7.0.6 get-stream: 8.0.1 human-signals: 5.0.0 is-stream: 3.0.0 @@ -17115,7 +17180,7 @@ snapshots: file-selector@0.6.0: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 file-system-cache@2.3.0: dependencies: @@ -17156,17 +17221,17 @@ snapshots: flat-cache@3.2.0: dependencies: - flatted: 3.3.1 + flatted: 3.3.2 keyv: 4.5.4 rimraf: 3.0.2 flat@5.0.2: {} - flatted@3.3.1: {} + flatted@3.3.2: {} - follow-redirects@1.15.9(debug@4.3.7): + follow-redirects@1.15.9(debug@4.4.0): optionalDependencies: - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) for-each@0.3.3: dependencies: @@ -17176,7 +17241,7 @@ snapshots: foreground-child@3.3.0: dependencies: - cross-spawn: 7.0.3 + cross-spawn: 7.0.6 signal-exit: 4.1.0 form-data@4.0.1: @@ -17193,9 +17258,11 @@ snapshots: fraction.js@4.3.7: {} - framer-motion@11.11.9(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + framer-motion@11.14.4(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - tslib: 2.8.0 + motion-dom: 11.14.3 + motion-utils: 11.14.3 + tslib: 2.8.1 optionalDependencies: '@emotion/is-prop-valid': 0.8.8 react: 18.3.1 @@ -17248,9 +17315,9 @@ snapshots: function.prototype.name@1.1.6: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 functions-have-names: 1.2.3 functional-red-black-tree@1.0.1: {} @@ -17292,13 +17359,18 @@ snapshots: get-func-name@2.0.2: {} - get-intrinsic@1.2.4: + get-intrinsic@1.2.6: dependencies: + call-bind-apply-helpers: 1.0.1 + dunder-proto: 1.0.0 + es-define-property: 1.0.1 es-errors: 1.3.0 + es-object-atoms: 1.0.0 function-bind: 1.1.2 - has-proto: 1.0.3 - has-symbols: 1.0.3 + gopd: 1.2.0 + has-symbols: 1.1.0 hasown: 2.0.2 + math-intrinsics: 1.0.0 get-nonce@1.0.1: {} @@ -17315,9 +17387,9 @@ snapshots: get-symbol-description@1.0.2: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 es-errors: 1.3.0 - get-intrinsic: 1.2.4 + get-intrinsic: 1.2.6 get-tsconfig@4.8.1: dependencies: @@ -17388,7 +17460,7 @@ snapshots: globalthis@1.0.4: dependencies: define-properties: 1.2.1 - gopd: 1.0.1 + gopd: 1.2.0 globby@11.1.0: dependencies: @@ -17401,9 +17473,7 @@ snapshots: glur@1.1.2: {} - gopd@1.0.1: - dependencies: - get-intrinsic: 1.2.4 + gopd@1.2.0: {} graceful-fs@4.2.11: {} @@ -17429,16 +17499,6 @@ snapshots: lodash.merge: 4.6.2 lodash.mergewith: 4.6.2 - graphql-jit@0.8.6(graphql@16.9.0): - dependencies: - '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) - fast-json-stringify: 5.16.1 - generate-function: 2.3.1 - graphql: 16.9.0 - lodash.memoize: 4.1.2 - lodash.merge: 4.6.2 - lodash.mergewith: 4.6.2 - graphql-jit@0.8.7(graphql@16.9.0): dependencies: '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) @@ -17452,31 +17512,31 @@ snapshots: graphql-tag@2.12.6(graphql@16.8.0): dependencies: graphql: 16.8.0 - tslib: 2.8.0 + tslib: 2.8.1 graphql-tag@2.12.6(graphql@16.9.0): dependencies: graphql: 16.9.0 - tslib: 2.8.0 + tslib: 2.8.1 graphql-ws@5.16.0(graphql@16.9.0): dependencies: graphql: 16.9.0 - graphql-yoga@5.7.0(graphql@16.9.0): + graphql-yoga@5.10.5(graphql@16.9.0): dependencies: '@envelop/core': 5.0.2 - '@graphql-tools/executor': 1.3.2(graphql@16.9.0) - '@graphql-tools/schema': 10.0.7(graphql@16.9.0) - '@graphql-tools/utils': 10.5.5(graphql@16.9.0) + '@graphql-tools/executor': 1.3.8(graphql@16.9.0) + '@graphql-tools/schema': 10.0.12(graphql@16.9.0) + '@graphql-tools/utils': 10.6.3(graphql@16.9.0) '@graphql-yoga/logger': 2.0.0 - '@graphql-yoga/subscription': 5.0.1 - '@whatwg-node/fetch': 0.9.22 - '@whatwg-node/server': 0.9.50 + '@graphql-yoga/subscription': 5.0.2 + '@whatwg-node/fetch': 0.10.1 + '@whatwg-node/server': 0.9.63 dset: 3.1.4 graphql: 16.9.0 lru-cache: 10.4.3 - tslib: 2.8.0 + tslib: 2.8.1 graphql@16.8.0: {} @@ -17489,24 +17549,24 @@ snapshots: section-matter: 1.0.0 strip-bom-string: 1.0.0 - hardhat-secure-accounts@0.0.6(@nomiclabs/hardhat-ethers@2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)))(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)): + hardhat-secure-accounts@0.0.6(@nomiclabs/hardhat-ethers@2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)))(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)): dependencies: - '@nomiclabs/hardhat-ethers': 2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)) - debug: 4.3.7(supports-color@8.1.1) + '@nomiclabs/hardhat-ethers': 2.2.3(ethers@5.7.2)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + debug: 4.4.0(supports-color@8.1.1) enquirer: 2.4.1 ethers: 5.7.2 - hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) lodash.clonedeep: 4.5.0 prompt-sync: 4.2.0 transitivePeerDependencies: - supports-color - hardhat-watcher@2.5.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3)): + hardhat-watcher@2.5.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)): dependencies: chokidar: 3.6.0 - hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3) + hardhat: 2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) - hardhat@2.14.1(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3))(typescript@5.6.3): + hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2): dependencies: '@ethersproject/abi': 5.7.0 '@metamask/eth-sig-util': 4.0.1 @@ -17531,7 +17591,7 @@ snapshots: chalk: 2.4.2 chokidar: 3.6.0 ci-info: 2.0.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) enquirer: 2.4.1 env-paths: 2.2.1 ethereum-cryptography: 1.2.0 @@ -17545,13 +17605,13 @@ snapshots: keccak: 3.0.4 lodash: 4.17.21 mnemonist: 0.38.5 - mocha: 10.7.3 + mocha: 10.8.2 p-map: 4.0.0 - qs: 6.13.0 + qs: 6.13.1 raw-body: 2.5.2 resolve: 1.17.0 semver: 6.3.1 - solc: 0.7.3(debug@4.3.7) + solc: 0.7.3(debug@4.4.0) source-map-support: 0.5.21 stacktrace-parser: 0.1.10 tsort: 0.0.1 @@ -17559,8 +17619,8 @@ snapshots: uuid: 8.3.2 ws: 7.5.10 optionalDependencies: - ts-node: 10.9.2(@types/node@22.7.8)(typescript@5.6.3) - typescript: 5.6.3 + ts-node: 10.9.2(@types/node@22.10.2)(typescript@5.7.2) + typescript: 5.7.2 transitivePeerDependencies: - bufferutil - supports-color @@ -17576,15 +17636,17 @@ snapshots: has-property-descriptors@1.0.2: dependencies: - es-define-property: 1.0.0 + es-define-property: 1.0.1 - has-proto@1.0.3: {} + has-proto@1.2.0: + dependencies: + dunder-proto: 1.0.0 - has-symbols@1.0.3: {} + has-symbols@1.1.0: {} has-tostringtag@1.0.2: dependencies: - has-symbols: 1.0.3 + has-symbols: 1.1.0 has-unicode@2.0.1: optional: true @@ -17610,16 +17672,16 @@ snapshots: dependencies: function-bind: 1.1.2 - hast-util-from-dom@5.0.0: + hast-util-from-dom@5.0.1: dependencies: '@types/hast': 3.0.4 - hastscript: 8.0.0 + hastscript: 9.0.0 web-namespaces: 2.0.1 hast-util-from-html-isomorphic@2.0.0: dependencies: '@types/hast': 3.0.4 - hast-util-from-dom: 5.0.0 + hast-util-from-dom: 5.0.1 hast-util-from-html: 2.0.3 unist-util-remove-position: 5.0.0 @@ -17627,17 +17689,17 @@ snapshots: dependencies: '@types/hast': 3.0.4 devlop: 1.1.0 - hast-util-from-parse5: 8.0.1 - parse5: 7.2.0 + hast-util-from-parse5: 8.0.2 + parse5: 7.2.1 vfile: 6.0.3 vfile-message: 4.0.2 - hast-util-from-parse5@8.0.1: + hast-util-from-parse5@8.0.2: dependencies: '@types/hast': 3.0.4 '@types/unist': 3.0.3 devlop: 1.1.0 - hastscript: 8.0.0 + hastscript: 9.0.0 property-information: 6.5.0 vfile: 6.0.3 vfile-location: 5.0.3 @@ -17651,16 +17713,16 @@ snapshots: dependencies: '@types/hast': 3.0.4 - hast-util-raw@9.0.4: + hast-util-raw@9.1.0: dependencies: '@types/hast': 3.0.4 '@types/unist': 3.0.3 - '@ungap/structured-clone': 1.2.0 - hast-util-from-parse5: 8.0.1 + '@ungap/structured-clone': 1.2.1 + hast-util-from-parse5: 8.0.2 hast-util-to-parse5: 8.0.0 html-void-elements: 3.0.0 mdast-util-to-hast: 13.2.0 - parse5: 7.2.0 + parse5: 7.2.1 unist-util-position: 5.0.0 unist-util-visit: 5.0.0 vfile: 6.0.3 @@ -17687,7 +17749,7 @@ snapshots: transitivePeerDependencies: - supports-color - hast-util-to-html@9.0.3: + hast-util-to-html@9.0.4: dependencies: '@types/hast': 3.0.4 '@types/unist': 3.0.3 @@ -17724,7 +17786,7 @@ snapshots: dependencies: '@types/hast': 3.0.4 - hastscript@8.0.0: + hastscript@9.0.0: dependencies: '@types/hast': 3.0.4 comma-separated-tokens: 2.0.3 @@ -17737,7 +17799,7 @@ snapshots: header-case@2.0.4: dependencies: capital-case: 1.0.4 - tslib: 2.8.0 + tslib: 2.8.1 helmet@7.0.0: {} @@ -17772,22 +17834,22 @@ snapshots: http-proxy-agent@7.0.2: dependencies: - agent-base: 7.1.1 - debug: 4.3.7(supports-color@8.1.1) + agent-base: 7.1.3 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color - https-proxy-agent@7.0.5: + https-proxy-agent@7.0.6: dependencies: - agent-base: 7.1.1 - debug: 4.3.7(supports-color@8.1.1) + agent-base: 7.1.3 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -17861,22 +17923,22 @@ snapshots: through: 2.3.8 wrap-ansi: 6.2.0 - internal-slot@1.0.7: + internal-slot@1.1.0: dependencies: es-errors: 1.3.0 hasown: 2.0.2 - side-channel: 1.0.6 + side-channel: 1.1.0 internmap@1.0.1: {} internmap@2.0.3: {} - intl-messageformat@10.7.1: + intl-messageformat@10.7.10: dependencies: - '@formatjs/ecma402-abstract': 2.2.0 - '@formatjs/fast-memoize': 2.2.1 - '@formatjs/icu-messageformat-parser': 2.8.0 - tslib: 2.8.0 + '@formatjs/ecma402-abstract': 2.3.1 + '@formatjs/fast-memoize': 2.2.5 + '@formatjs/icu-messageformat-parser': 2.9.7 + tslib: 2.8.1 invariant@2.2.4: dependencies: @@ -17915,8 +17977,8 @@ snapshots: is-array-buffer@3.0.4: dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 + call-bind: 1.0.8 + get-intrinsic: 1.2.6 is-arrayish@0.2.1: {} @@ -17926,7 +17988,7 @@ snapshots: dependencies: has-tostringtag: 1.0.2 - is-bigint@1.0.4: + is-bigint@1.1.0: dependencies: has-bigints: 1.0.2 @@ -17934,31 +17996,34 @@ snapshots: dependencies: binary-extensions: 2.3.0 - is-boolean-object@1.1.2: + is-boolean-object@1.2.1: dependencies: - call-bind: 1.0.7 + call-bound: 1.0.2 has-tostringtag: 1.0.2 is-buffer@1.1.6: {} is-buffer@2.0.5: {} - is-bun-module@1.2.1: + is-bun-module@1.3.0: dependencies: semver: 7.6.3 is-callable@1.2.7: {} - is-core-module@2.15.1: + is-core-module@2.16.0: dependencies: hasown: 2.0.2 - is-data-view@1.0.1: + is-data-view@1.0.2: dependencies: + call-bound: 1.0.2 + get-intrinsic: 1.2.6 is-typed-array: 1.1.13 - is-date-object@1.0.5: + is-date-object@1.1.0: dependencies: + call-bound: 1.0.2 has-tostringtag: 1.0.2 is-decimal@1.0.4: {} @@ -17973,9 +18038,9 @@ snapshots: is-extglob@2.1.1: {} - is-finalizationregistry@1.0.2: + is-finalizationregistry@1.1.0: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 is-fullwidth-code-point@3.0.0: {} @@ -18000,14 +18065,15 @@ snapshots: is-lower-case@2.0.2: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 is-map@2.0.3: {} is-negative-zero@2.0.3: {} - is-number-object@1.0.7: + is-number-object@1.1.0: dependencies: + call-bind: 1.0.8 has-tostringtag: 1.0.2 is-number@7.0.0: {} @@ -18026,14 +18092,16 @@ snapshots: is-property@1.0.2: {} - is-reference@3.0.2: + is-reference@3.0.3: dependencies: '@types/estree': 1.0.6 - is-regex@1.1.4: + is-regex@1.2.1: dependencies: - call-bind: 1.0.7 + call-bound: 1.0.2 + gopd: 1.2.0 has-tostringtag: 1.0.2 + hasown: 2.0.2 is-relative@1.0.0: dependencies: @@ -18043,23 +18111,26 @@ snapshots: is-shared-array-buffer@1.0.3: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 is-stream@1.1.0: {} is-stream@3.0.0: {} - is-string@1.0.7: + is-string@1.1.0: dependencies: + call-bind: 1.0.8 has-tostringtag: 1.0.2 - is-symbol@1.0.4: + is-symbol@1.1.1: dependencies: - has-symbols: 1.0.3 + call-bound: 1.0.2 + has-symbols: 1.1.0 + safe-regex-test: 1.1.0 is-typed-array@1.1.13: dependencies: - which-typed-array: 1.1.15 + which-typed-array: 1.1.16 is-unc-path@1.0.0: dependencies: @@ -18069,18 +18140,18 @@ snapshots: is-upper-case@2.0.2: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 is-weakmap@2.0.2: {} - is-weakref@1.0.2: + is-weakref@1.1.0: dependencies: - call-bind: 1.0.7 + call-bound: 1.0.2 is-weakset@2.0.3: dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 + call-bind: 1.0.8 + get-intrinsic: 1.2.6 is-windows@1.0.2: {} @@ -18099,12 +18170,13 @@ snapshots: dependencies: ws: 8.18.0 - iterator.prototype@1.1.3: + iterator.prototype@1.1.4: dependencies: - define-properties: 1.2.1 - get-intrinsic: 1.2.4 - has-symbols: 1.0.3 - reflect.getprototypeof: 1.0.6 + define-data-property: 1.1.4 + es-object-atoms: 1.0.0 + get-intrinsic: 1.2.6 + has-symbols: 1.1.0 + reflect.getprototypeof: 1.0.8 set-function-name: 2.0.2 itty-time@1.0.6: {} @@ -18140,7 +18212,7 @@ snapshots: js-tokens@4.0.0: {} - js-tokens@9.0.0: {} + js-tokens@9.0.1: {} js-yaml@3.14.1: dependencies: @@ -18164,10 +18236,10 @@ snapshots: form-data: 4.0.1 html-encoding-sniffer: 4.0.0 http-proxy-agent: 7.0.2 - https-proxy-agent: 7.0.5 + https-proxy-agent: 7.0.6 is-potential-custom-element-name: 1.0.1 - nwsapi: 2.2.13 - parse5: 7.2.0 + nwsapi: 2.2.16 + parse5: 7.2.1 rrweb-cssom: 0.7.1 saxes: 6.0.0 symbol-tree: 3.2.4 @@ -18176,7 +18248,7 @@ snapshots: webidl-conversions: 7.0.0 whatwg-encoding: 3.1.1 whatwg-mimetype: 4.0.0 - whatwg-url: 14.0.0 + whatwg-url: 14.1.0 ws: 8.18.0 xml-name-validator: 5.0.0 transitivePeerDependencies: @@ -18184,7 +18256,7 @@ snapshots: - supports-color - utf-8-validate - jsesc@3.0.2: {} + jsesc@3.1.0: {} json-bigint-patch@0.0.8: {} @@ -18239,14 +18311,14 @@ snapshots: object.assign: 4.1.5 object.values: 1.2.0 - katex@0.16.11: + katex@0.16.15: dependencies: commander: 8.3.0 keccak@3.0.4: dependencies: node-addon-api: 2.0.2 - node-gyp-build: 4.8.2 + node-gyp-build: 4.8.4 readable-stream: 3.6.2 keyv@4.5.4: @@ -18295,9 +18367,7 @@ snapshots: dependencies: immediate: 3.0.6 - lilconfig@2.1.0: {} - - lilconfig@3.1.2: {} + lilconfig@3.1.3: {} linebreak@1.1.0: dependencies: @@ -18315,9 +18385,9 @@ snapshots: load-tsconfig@0.2.5: {} - local-pkg@0.5.0: + local-pkg@0.5.1: dependencies: - mlly: 1.7.2 + mlly: 1.7.3 pkg-types: 1.2.1 localforage@1.10.0: @@ -18372,11 +18442,11 @@ snapshots: lower-case-first@2.0.2: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 lower-case@2.0.2: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 lru-cache@10.4.3: {} @@ -18395,7 +18465,7 @@ snapshots: dependencies: sourcemap-codec: 1.4.8 - magic-string@0.30.12: + magic-string@0.30.15: dependencies: '@jridgewell/sourcemap-codec': 1.5.0 @@ -18432,6 +18502,8 @@ snapshots: markdown-table@3.0.4: {} + math-intrinsics@1.0.0: {} + mcl-wasm@0.7.9: {} md5.js@1.3.5: @@ -18486,19 +18558,19 @@ snapshots: transitivePeerDependencies: - supports-color - mdast-util-from-markdown@2.0.1: + mdast-util-from-markdown@2.0.2: dependencies: '@types/mdast': 4.0.4 '@types/unist': 3.0.3 decode-named-character-reference: 1.0.2 devlop: 1.1.0 mdast-util-to-string: 4.0.0 - micromark: 4.0.0 - micromark-util-decode-numeric-character-reference: 2.0.1 - micromark-util-decode-string: 2.0.0 - micromark-util-normalize-identifier: 2.0.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark: 4.0.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-decode-string: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 unist-util-stringify-position: 4.0.0 transitivePeerDependencies: - supports-color @@ -18508,8 +18580,8 @@ snapshots: '@types/mdast': 4.0.4 devlop: 1.1.0 escape-string-regexp: 5.0.0 - mdast-util-from-markdown: 2.0.1 - mdast-util-to-markdown: 2.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 micromark-extension-frontmatter: 2.0.0 transitivePeerDependencies: - supports-color @@ -18583,7 +18655,7 @@ snapshots: ccount: 2.0.1 mdast-util-from-markdown: 1.3.1 mdast-util-to-markdown: 1.5.0 - parse-entities: 4.0.1 + parse-entities: 4.0.2 stringify-entities: 4.0.4 unist-util-remove-position: 4.0.2 unist-util-stringify-position: 3.0.3 @@ -18636,9 +18708,9 @@ snapshots: dependencies: '@types/hast': 3.0.4 '@types/mdast': 4.0.4 - '@ungap/structured-clone': 1.2.0 + '@ungap/structured-clone': 1.2.1 devlop: 1.1.0 - micromark-util-sanitize-uri: 2.0.0 + micromark-util-sanitize-uri: 2.0.1 trim-lines: 3.0.1 unist-util-position: 5.0.0 unist-util-visit: 5.0.0 @@ -18655,14 +18727,15 @@ snapshots: unist-util-visit: 4.1.2 zwitch: 2.0.4 - mdast-util-to-markdown@2.1.0: + mdast-util-to-markdown@2.1.2: dependencies: '@types/mdast': 4.0.4 '@types/unist': 3.0.3 longest-streak: 3.1.0 mdast-util-phrasing: 4.1.0 mdast-util-to-string: 4.0.0 - micromark-util-decode-string: 2.0.0 + micromark-util-classify-character: 2.0.1 + micromark-util-decode-string: 2.0.1 unist-util-visit: 5.0.0 zwitch: 2.0.4 @@ -18702,16 +18775,16 @@ snapshots: dependencies: '@braintree/sanitize-url': 6.0.4 '@types/d3-scale': 4.0.8 - '@types/d3-scale-chromatic': 3.0.3 - cytoscape: 3.30.2 - cytoscape-cose-bilkent: 4.1.0(cytoscape@3.30.2) + '@types/d3-scale-chromatic': 3.1.0 + cytoscape: 3.30.4 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.30.4) d3: 7.9.0 d3-sankey: 0.12.3 dagre-d3-es: 7.0.10 dayjs: 1.11.13 dompurify: 3.1.6 elkjs: 0.9.3 - katex: 0.16.11 + katex: 0.16.15 khroma: 2.1.0 lodash-es: 4.17.21 mdast-util-from-markdown: 1.3.1 @@ -18723,9 +18796,9 @@ snapshots: transitivePeerDependencies: - supports-color - meros@1.3.0(@types/node@22.7.8): + meros@1.3.0(@types/node@22.10.2): optionalDependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 methods@1.1.2: {} @@ -18748,31 +18821,31 @@ snapshots: micromark-util-types: 1.1.0 uvu: 0.5.6 - micromark-core-commonmark@2.0.1: + micromark-core-commonmark@2.0.2: dependencies: decode-named-character-reference: 1.0.2 devlop: 1.1.0 - micromark-factory-destination: 2.0.0 - micromark-factory-label: 2.0.0 - micromark-factory-space: 2.0.0 - micromark-factory-title: 2.0.0 - micromark-factory-whitespace: 2.0.0 - micromark-util-character: 2.1.0 - micromark-util-chunked: 2.0.0 - micromark-util-classify-character: 2.0.0 - micromark-util-html-tag-name: 2.0.0 - micromark-util-normalize-identifier: 2.0.0 - micromark-util-resolve-all: 2.0.0 - micromark-util-subtokenize: 2.0.1 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-factory-destination: 2.0.1 + micromark-factory-label: 2.0.1 + micromark-factory-space: 2.0.1 + micromark-factory-title: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-html-tag-name: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-subtokenize: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-extension-frontmatter@2.0.0: dependencies: fault: 2.0.1 - micromark-util-character: 2.1.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-extension-gfm-autolink-literal@1.0.5: dependencies: @@ -18835,7 +18908,7 @@ snapshots: micromark-extension-math@2.1.2: dependencies: '@types/katex': 0.16.7 - katex: 0.16.11 + katex: 0.16.15 micromark-factory-space: 1.1.0 micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 @@ -18884,8 +18957,8 @@ snapshots: micromark-extension-mdxjs@1.0.1: dependencies: - acorn: 8.13.0 - acorn-jsx: 5.3.2(acorn@8.13.0) + acorn: 8.14.0 + acorn-jsx: 5.3.2(acorn@8.14.0) micromark-extension-mdx-expression: 1.0.8 micromark-extension-mdx-jsx: 1.0.5 micromark-extension-mdx-md: 1.0.1 @@ -18899,11 +18972,11 @@ snapshots: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 - micromark-factory-destination@2.0.0: + micromark-factory-destination@2.0.1: dependencies: - micromark-util-character: 2.1.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-factory-label@1.1.0: dependencies: @@ -18912,12 +18985,12 @@ snapshots: micromark-util-types: 1.1.0 uvu: 0.5.6 - micromark-factory-label@2.0.0: + micromark-factory-label@2.0.1: dependencies: devlop: 1.1.0 - micromark-util-character: 2.1.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-factory-mdx-expression@1.0.9: dependencies: @@ -18935,10 +19008,10 @@ snapshots: micromark-util-character: 1.2.0 micromark-util-types: 1.1.0 - micromark-factory-space@2.0.0: + micromark-factory-space@2.0.1: dependencies: - micromark-util-character: 2.1.0 - micromark-util-types: 2.0.0 + micromark-util-character: 2.1.1 + micromark-util-types: 2.0.1 micromark-factory-title@1.1.0: dependencies: @@ -18947,12 +19020,12 @@ snapshots: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 - micromark-factory-title@2.0.0: + micromark-factory-title@2.0.1: dependencies: - micromark-factory-space: 2.0.0 - micromark-util-character: 2.1.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-factory-whitespace@1.1.0: dependencies: @@ -18961,30 +19034,30 @@ snapshots: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 - micromark-factory-whitespace@2.0.0: + micromark-factory-whitespace@2.0.1: dependencies: - micromark-factory-space: 2.0.0 - micromark-util-character: 2.1.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-util-character@1.2.0: dependencies: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 - micromark-util-character@2.1.0: + micromark-util-character@2.1.1: dependencies: - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-util-chunked@1.1.0: dependencies: micromark-util-symbol: 1.1.0 - micromark-util-chunked@2.0.0: + micromark-util-chunked@2.0.1: dependencies: - micromark-util-symbol: 2.0.0 + micromark-util-symbol: 2.0.1 micromark-util-classify-character@1.1.0: dependencies: @@ -18992,29 +19065,29 @@ snapshots: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 - micromark-util-classify-character@2.0.0: + micromark-util-classify-character@2.0.1: dependencies: - micromark-util-character: 2.1.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-util-combine-extensions@1.1.0: dependencies: micromark-util-chunked: 1.1.0 micromark-util-types: 1.1.0 - micromark-util-combine-extensions@2.0.0: + micromark-util-combine-extensions@2.0.1: dependencies: - micromark-util-chunked: 2.0.0 - micromark-util-types: 2.0.0 + micromark-util-chunked: 2.0.1 + micromark-util-types: 2.0.1 micromark-util-decode-numeric-character-reference@1.1.0: dependencies: micromark-util-symbol: 1.1.0 - micromark-util-decode-numeric-character-reference@2.0.1: + micromark-util-decode-numeric-character-reference@2.0.2: dependencies: - micromark-util-symbol: 2.0.0 + micromark-util-symbol: 2.0.1 micromark-util-decode-string@1.1.0: dependencies: @@ -19023,16 +19096,16 @@ snapshots: micromark-util-decode-numeric-character-reference: 1.1.0 micromark-util-symbol: 1.1.0 - micromark-util-decode-string@2.0.0: + micromark-util-decode-string@2.0.1: dependencies: decode-named-character-reference: 1.0.2 - micromark-util-character: 2.1.0 - micromark-util-decode-numeric-character-reference: 2.0.1 - micromark-util-symbol: 2.0.0 + micromark-util-character: 2.1.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-symbol: 2.0.1 micromark-util-encode@1.1.0: {} - micromark-util-encode@2.0.0: {} + micromark-util-encode@2.0.1: {} micromark-util-events-to-acorn@1.2.3: dependencies: @@ -19047,23 +19120,23 @@ snapshots: micromark-util-html-tag-name@1.2.0: {} - micromark-util-html-tag-name@2.0.0: {} + micromark-util-html-tag-name@2.0.1: {} micromark-util-normalize-identifier@1.1.0: dependencies: micromark-util-symbol: 1.1.0 - micromark-util-normalize-identifier@2.0.0: + micromark-util-normalize-identifier@2.0.1: dependencies: - micromark-util-symbol: 2.0.0 + micromark-util-symbol: 2.0.1 micromark-util-resolve-all@1.1.0: dependencies: micromark-util-types: 1.1.0 - micromark-util-resolve-all@2.0.0: + micromark-util-resolve-all@2.0.1: dependencies: - micromark-util-types: 2.0.0 + micromark-util-types: 2.0.1 micromark-util-sanitize-uri@1.2.0: dependencies: @@ -19071,11 +19144,11 @@ snapshots: micromark-util-encode: 1.1.0 micromark-util-symbol: 1.1.0 - micromark-util-sanitize-uri@2.0.0: + micromark-util-sanitize-uri@2.0.1: dependencies: - micromark-util-character: 2.1.0 - micromark-util-encode: 2.0.0 - micromark-util-symbol: 2.0.0 + micromark-util-character: 2.1.1 + micromark-util-encode: 2.0.1 + micromark-util-symbol: 2.0.1 micromark-util-subtokenize@1.1.0: dependencies: @@ -19084,24 +19157,24 @@ snapshots: micromark-util-types: 1.1.0 uvu: 0.5.6 - micromark-util-subtokenize@2.0.1: + micromark-util-subtokenize@2.0.3: dependencies: devlop: 1.1.0 - micromark-util-chunked: 2.0.0 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-util-chunked: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 micromark-util-symbol@1.1.0: {} - micromark-util-symbol@2.0.0: {} + micromark-util-symbol@2.0.1: {} micromark-util-types@1.1.0: {} - micromark-util-types@2.0.0: {} + micromark-util-types@2.0.1: {} micromark@2.11.4: dependencies: - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) parse-entities: 2.0.0 transitivePeerDependencies: - supports-color @@ -19109,7 +19182,7 @@ snapshots: micromark@3.2.0: dependencies: '@types/debug': 4.1.12 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) decode-named-character-reference: 1.0.2 micromark-core-commonmark: 1.1.0 micromark-factory-space: 1.1.0 @@ -19128,25 +19201,25 @@ snapshots: transitivePeerDependencies: - supports-color - micromark@4.0.0: + micromark@4.0.1: dependencies: '@types/debug': 4.1.12 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) decode-named-character-reference: 1.0.2 devlop: 1.1.0 - micromark-core-commonmark: 2.0.1 - micromark-factory-space: 2.0.0 - micromark-util-character: 2.1.0 - micromark-util-chunked: 2.0.0 - micromark-util-combine-extensions: 2.0.0 - micromark-util-decode-numeric-character-reference: 2.0.1 - micromark-util-encode: 2.0.0 - micromark-util-normalize-identifier: 2.0.0 - micromark-util-resolve-all: 2.0.0 - micromark-util-sanitize-uri: 2.0.0 - micromark-util-subtokenize: 2.0.1 - micromark-util-symbol: 2.0.0 - micromark-util-types: 2.0.0 + micromark-core-commonmark: 2.0.2 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-combine-extensions: 2.0.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-encode: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-subtokenize: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 transitivePeerDependencies: - supports-color @@ -19169,20 +19242,20 @@ snapshots: mimic-fn@4.0.0: {} - miniflare@3.20241106.0: + miniflare@3.20241205.0: dependencies: '@cspotcode/source-map-support': 0.8.1 - acorn: 8.13.0 + acorn: 8.14.0 acorn-walk: 8.3.4 capnp-ts: 0.7.0 exit-hook: 2.2.1 glob-to-regexp: 0.4.1 stoppable: 1.1.0 undici: 5.28.4 - workerd: 1.20241106.1 + workerd: 1.20241205.0 ws: 8.18.0 youch: 3.3.4 - zod: 3.23.8 + zod: 3.24.1 transitivePeerDependencies: - bufferutil - supports-color @@ -19266,9 +19339,9 @@ snapshots: mkdirp@3.0.1: {} - mlly@1.7.2: + mlly@1.7.3: dependencies: - acorn: 8.13.0 + acorn: 8.14.0 pathe: 1.1.2 pkg-types: 1.2.1 ufo: 1.5.4 @@ -19277,12 +19350,12 @@ snapshots: dependencies: obliterator: 2.0.4 - mocha@10.7.3: + mocha@10.8.2: dependencies: ansi-colors: 4.1.3 browser-stdout: 1.3.1 chokidar: 3.6.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) diff: 5.2.0 escape-string-regexp: 4.0.0 find-up: 5.0.0 @@ -19318,6 +19391,19 @@ snapshots: transitivePeerDependencies: - supports-color + motion-dom@11.14.3: {} + + motion-utils@11.14.3: {} + + motion@11.14.4(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + dependencies: + framer-motion: 11.14.4(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + tslib: 2.8.1 + optionalDependencies: + '@emotion/is-prop-valid': 0.8.8 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + mri@1.2.0: {} ms@2.0.0: {} @@ -19350,7 +19436,7 @@ snapshots: stacktrace-js: 2.0.2 stylis: 4.3.4 - nanoid@3.3.7: {} + nanoid@3.3.8: {} napi-macros@2.2.2: {} @@ -19372,46 +19458,46 @@ snapshots: transitivePeerDependencies: - supports-color - next-seo@6.6.0(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next-seo@6.6.0(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - next: 14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - next-sitemap@4.2.3(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)): + next-sitemap@4.2.3(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)): dependencies: '@corex/deepmerge': 4.0.43 '@next/env': 13.5.7 fast-glob: 3.3.2 minimist: 1.2.8 - next: 14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - '@next/env': 14.2.18 + '@next/env': 14.2.20 '@swc/helpers': 0.5.5 busboy: 1.6.0 - caniuse-lite: 1.0.30001669 + caniuse-lite: 1.0.30001688 graceful-fs: 4.2.11 postcss: 8.4.31 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - styled-jsx: 5.1.1(@babel/core@7.25.9)(react@18.3.1) + styled-jsx: 5.1.1(@babel/core@7.26.0)(react@18.3.1) optionalDependencies: - '@next/swc-darwin-arm64': 14.2.18 - '@next/swc-darwin-x64': 14.2.18 - '@next/swc-linux-arm64-gnu': 14.2.18 - '@next/swc-linux-arm64-musl': 14.2.18 - '@next/swc-linux-x64-gnu': 14.2.18 - '@next/swc-linux-x64-musl': 14.2.18 - '@next/swc-win32-arm64-msvc': 14.2.18 - '@next/swc-win32-ia32-msvc': 14.2.18 - '@next/swc-win32-x64-msvc': 14.2.18 + '@next/swc-darwin-arm64': 14.2.20 + '@next/swc-darwin-x64': 14.2.20 + '@next/swc-linux-arm64-gnu': 14.2.20 + '@next/swc-linux-arm64-musl': 14.2.20 + '@next/swc-linux-x64-gnu': 14.2.20 + '@next/swc-linux-x64-musl': 14.2.20 + '@next/swc-win32-arm64-msvc': 14.2.20 + '@next/swc-win32-ia32-msvc': 14.2.20 + '@next/swc-win32-x64-msvc': 14.2.20 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros - nextra@2.13.4(next@14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + nextra@2.13.4(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@headlessui/react': 1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mdx-js/mdx': 2.3.0 @@ -19423,9 +19509,9 @@ snapshots: github-slugger: 2.0.0 graceful-fs: 4.2.11 gray-matter: 4.0.3 - katex: 0.16.11 + katex: 0.16.15 lodash.get: 4.4.2 - next: 14.2.18(@babel/core@7.25.9)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-mdx-remote: 4.4.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) p-limit: 3.1.0 react: 18.3.1 @@ -19441,7 +19527,7 @@ snapshots: title: 3.5.3 unist-util-remove: 4.0.0 unist-util-visit: 5.0.0 - zod: 3.23.8 + zod: 3.24.1 transitivePeerDependencies: - supports-color @@ -19450,7 +19536,7 @@ snapshots: no-case@3.0.4: dependencies: lower-case: 2.0.2 - tslib: 2.8.0 + tslib: 2.8.1 node-addon-api@2.0.2: {} @@ -19464,7 +19550,7 @@ snapshots: node-forge@1.3.1: {} - node-gyp-build@4.8.2: {} + node-gyp-build@4.8.4: {} node-gyp@10.0.1: dependencies: @@ -19498,7 +19584,7 @@ snapshots: - supports-color optional: true - node-releases@2.0.18: {} + node-releases@2.0.19: {} non-layered-tidy-tree-layout@2.0.2: {} @@ -19551,7 +19637,7 @@ snapshots: numeral@2.0.6: {} - nwsapi@2.2.13: {} + nwsapi@2.2.16: {} object-assign@4.1.1: {} @@ -19561,37 +19647,39 @@ snapshots: object-inspect@1.13.2: {} + object-inspect@1.13.3: {} + object-keys@1.1.1: {} object.assign@4.1.5: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - has-symbols: 1.0.3 + has-symbols: 1.1.0 object-keys: 1.1.1 object.entries@1.1.8: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 es-object-atoms: 1.0.0 object.fromentries@2.0.8: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-object-atoms: 1.0.0 object.groupby@1.0.3: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 object.values@1.2.0: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 es-object-atoms: 1.0.0 @@ -19623,23 +19711,17 @@ snapshots: dependencies: mimic-fn: 4.0.0 - oniguruma-to-js@0.4.3: + oniguruma-to-es@0.7.0: dependencies: - regex: 4.3.3 + emoji-regex-xs: 1.0.0 + regex: 5.0.2 + regex-recursion: 4.3.0 open@7.4.2: dependencies: is-docker: 2.2.1 is-wsl: 2.2.0 - optimism@0.18.0: - dependencies: - '@wry/caches': 1.0.1 - '@wry/context': 0.7.4 - '@wry/trie': 0.4.3 - tslib: 2.8.0 - optional: true - optionator@0.9.4: dependencies: deep-is: 0.1.4 @@ -19710,7 +19792,7 @@ snapshots: param-case@3.0.4: dependencies: dot-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 parent-module@1.0.1: dependencies: @@ -19730,10 +19812,9 @@ snapshots: is-decimal: 1.0.4 is-hexadecimal: 1.0.4 - parse-entities@4.0.1: + parse-entities@4.0.2: dependencies: '@types/unist': 2.0.11 - character-entities: 2.0.2 character-entities-legacy: 3.0.0 character-reference-invalid: 2.0.1 decode-named-character-reference: 1.0.2 @@ -19749,21 +19830,21 @@ snapshots: parse-json@5.2.0: dependencies: - '@babel/code-frame': 7.25.9 + '@babel/code-frame': 7.26.2 error-ex: 1.3.2 json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 parse-json@6.0.2: dependencies: - '@babel/code-frame': 7.25.9 + '@babel/code-frame': 7.26.2 error-ex: 1.3.2 json-parse-even-better-errors: 2.3.1 lines-and-columns: 2.0.4 parse-numeric-range@1.3.0: {} - parse5@7.2.0: + parse5@7.2.1: dependencies: entities: 4.5.0 @@ -19772,14 +19853,14 @@ snapshots: pascal-case@3.1.2: dependencies: no-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 path-browserify@1.0.1: {} path-case@3.0.4: dependencies: dot-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 path-exists@3.0.0: {} @@ -19828,7 +19909,7 @@ snapshots: dependencies: '@types/estree': 1.0.6 estree-walker: 3.0.3 - is-reference: 3.0.2 + is-reference: 3.0.3 pg-cloudflare@1.1.1: optional: true @@ -19912,7 +19993,7 @@ snapshots: pkg-types@1.2.1: dependencies: confbox: 0.1.8 - mlly: 1.7.2 + mlly: 1.7.3 pathe: 1.1.2 pluralize@8.0.0: {} @@ -19928,29 +20009,29 @@ snapshots: postcss: 8.4.49 postcss-value-parser: 4.2.0 read-cache: 1.0.0 - resolve: 1.22.8 + resolve: 1.22.9 postcss-js@4.0.1(postcss@8.4.49): dependencies: camelcase-css: 2.0.1 postcss: 8.4.49 - postcss-load-config@4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)): + postcss-load-config@4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)): dependencies: - lilconfig: 3.1.2 - yaml: 2.6.0 + lilconfig: 3.1.3 + yaml: 2.6.1 optionalDependencies: postcss: 8.4.49 - ts-node: 10.9.2(@types/node@22.7.8)(typescript@5.6.3) + ts-node: 10.9.2(@types/node@22.10.2)(typescript@5.7.2) - postcss-load-config@6.0.1(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.0): + postcss-load-config@6.0.1(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.1): dependencies: - lilconfig: 3.1.2 + lilconfig: 3.1.3 optionalDependencies: jiti: 1.21.6 postcss: 8.4.49 tsx: 4.19.2 - yaml: 2.6.0 + yaml: 2.6.1 postcss-nested@6.2.0(postcss@8.4.49): dependencies: @@ -19966,13 +20047,13 @@ snapshots: postcss@8.4.31: dependencies: - nanoid: 3.3.7 + nanoid: 3.3.8 picocolors: 1.1.1 source-map-js: 1.2.1 postcss@8.4.49: dependencies: - nanoid: 3.3.7 + nanoid: 3.3.8 picocolors: 1.1.1 source-map-js: 1.2.1 @@ -19988,13 +20069,13 @@ snapshots: prelude-ls@1.2.1: {} - prettier-plugin-tailwindcss@0.6.9(prettier@3.3.3): + prettier-plugin-tailwindcss@0.6.9(prettier@3.4.2): dependencies: - prettier: 3.3.3 + prettier: 3.4.2 prettier@2.8.8: {} - prettier@3.3.3: {} + prettier@3.4.2: {} pretty-format@29.7.0: dependencies: @@ -20004,9 +20085,9 @@ snapshots: printable-characters@1.0.42: {} - prism-react-renderer@2.4.0(react@18.3.1): + prism-react-renderer@2.4.1(react@18.3.1): dependencies: - '@types/prismjs': 1.26.4 + '@types/prismjs': 1.26.5 clsx: 2.1.1 react: 18.3.1 @@ -20053,11 +20134,11 @@ snapshots: forwarded: 0.2.0 ipaddr.js: 1.9.1 - proxy-from-env@1.1.0: {} - pseudomap@1.0.2: {} - psl@1.9.0: {} + psl@1.15.0: + dependencies: + punycode: 2.3.1 pump@3.0.2: dependencies: @@ -20074,11 +20155,11 @@ snapshots: qs@6.11.0: dependencies: - side-channel: 1.0.6 + side-channel: 1.1.0 - qs@6.13.0: + qs@6.13.1: dependencies: - side-channel: 1.0.6 + side-channel: 1.1.0 querystringify@2.2.0: {} @@ -20108,46 +20189,47 @@ snapshots: iconv-lite: 0.4.24 unpipe: 1.0.0 - react-aria@3.35.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): - dependencies: - '@internationalized/string': 3.2.4 - '@react-aria/breadcrumbs': 3.5.18(react@18.3.1) - '@react-aria/button': 3.10.1(react@18.3.1) - '@react-aria/calendar': 3.5.13(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/checkbox': 3.14.8(react@18.3.1) - '@react-aria/color': 3.0.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/combobox': 3.10.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/datepicker': 3.11.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/dialog': 3.5.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/dnd': 3.7.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/focus': 3.18.4(react@18.3.1) - '@react-aria/gridlist': 3.9.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/i18n': 3.12.3(react@18.3.1) - '@react-aria/interactions': 3.22.4(react@18.3.1) - '@react-aria/label': 3.7.12(react@18.3.1) - '@react-aria/link': 3.7.6(react@18.3.1) - '@react-aria/listbox': 3.13.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/menu': 3.15.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/meter': 3.4.17(react@18.3.1) - '@react-aria/numberfield': 3.11.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/overlays': 3.23.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/progress': 3.4.17(react@18.3.1) - '@react-aria/radio': 3.10.9(react@18.3.1) - '@react-aria/searchfield': 3.7.10(react@18.3.1) - '@react-aria/select': 3.14.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/selection': 3.20.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/separator': 3.4.3(react@18.3.1) - '@react-aria/slider': 3.7.13(react@18.3.1) - '@react-aria/ssr': 3.9.6(react@18.3.1) - '@react-aria/switch': 3.6.9(react@18.3.1) - '@react-aria/table': 3.15.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/tabs': 3.9.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/tag': 3.4.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@react-aria/textfield': 3.14.10(react@18.3.1) - '@react-aria/tooltip': 3.7.9(react@18.3.1) - '@react-aria/utils': 3.25.3(react@18.3.1) - '@react-aria/visually-hidden': 3.8.17(react@18.3.1) - '@react-types/shared': 3.25.0(react@18.3.1) + react-aria@3.36.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + dependencies: + '@internationalized/string': 3.2.5 + '@react-aria/breadcrumbs': 3.5.19(react@18.3.1) + '@react-aria/button': 3.11.0(react@18.3.1) + '@react-aria/calendar': 3.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/checkbox': 3.15.0(react@18.3.1) + '@react-aria/color': 3.0.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/combobox': 3.11.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/datepicker': 3.12.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/dialog': 3.5.20(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/disclosure': 3.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/dnd': 3.8.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/focus': 3.19.0(react@18.3.1) + '@react-aria/gridlist': 3.10.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/i18n': 3.12.4(react@18.3.1) + '@react-aria/interactions': 3.22.5(react@18.3.1) + '@react-aria/label': 3.7.13(react@18.3.1) + '@react-aria/link': 3.7.7(react@18.3.1) + '@react-aria/listbox': 3.13.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/menu': 3.16.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/meter': 3.4.18(react@18.3.1) + '@react-aria/numberfield': 3.11.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/overlays': 3.24.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/progress': 3.4.18(react@18.3.1) + '@react-aria/radio': 3.10.10(react@18.3.1) + '@react-aria/searchfield': 3.7.11(react@18.3.1) + '@react-aria/select': 3.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/selection': 3.21.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/separator': 3.4.4(react@18.3.1) + '@react-aria/slider': 3.7.14(react@18.3.1) + '@react-aria/ssr': 3.9.7(react@18.3.1) + '@react-aria/switch': 3.6.10(react@18.3.1) + '@react-aria/table': 3.16.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/tabs': 3.9.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/tag': 3.4.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@react-aria/textfield': 3.15.0(react@18.3.1) + '@react-aria/tooltip': 3.7.10(react@18.3.1) + '@react-aria/utils': 3.26.0(react@18.3.1) + '@react-aria/visually-hidden': 3.8.18(react@18.3.1) + '@react-types/shared': 3.26.0(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -20165,7 +20247,7 @@ snapshots: react-dropzone@14.2.3(react@18.3.1): dependencies: - attr-accept: 2.2.4 + attr-accept: 2.2.5 file-selector: 0.6.0 prop-types: 15.8.1 react: 18.3.1 @@ -20187,26 +20269,26 @@ snapshots: react: 18.3.1 react-is: 18.3.1 - react-remove-scroll-bar@2.3.6(@types/react@18.3.12)(react@18.3.1): + react-remove-scroll-bar@2.3.6(@types/react@18.3.16)(react@18.3.1): dependencies: react: 18.3.1 - react-style-singleton: 2.2.1(@types/react@18.3.12)(react@18.3.1) - tslib: 2.8.0 + react-style-singleton: 2.2.1(@types/react@18.3.16)(react@18.3.1) + tslib: 2.8.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - react-remove-scroll@2.6.0(@types/react@18.3.12)(react@18.3.1): + react-remove-scroll@2.6.0(@types/react@18.3.16)(react@18.3.1): dependencies: react: 18.3.1 - react-remove-scroll-bar: 2.3.6(@types/react@18.3.12)(react@18.3.1) - react-style-singleton: 2.2.1(@types/react@18.3.12)(react@18.3.1) - tslib: 2.8.0 - use-callback-ref: 1.3.2(@types/react@18.3.12)(react@18.3.1) - use-sidecar: 1.1.2(@types/react@18.3.12)(react@18.3.1) + react-remove-scroll-bar: 2.3.6(@types/react@18.3.16)(react@18.3.1) + react-style-singleton: 2.2.1(@types/react@18.3.16)(react@18.3.1) + tslib: 2.8.1 + use-callback-ref: 1.3.2(@types/react@18.3.16)(react@18.3.1) + use-sidecar: 1.1.2(@types/react@18.3.16)(react@18.3.1) optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - react-smooth@4.0.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + react-smooth@4.0.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: fast-equals: 5.0.1 prop-types: 15.8.1 @@ -20214,30 +20296,30 @@ snapshots: react-dom: 18.3.1(react@18.3.1) react-transition-group: 4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - react-style-singleton@2.2.1(@types/react@18.3.12)(react@18.3.1): + react-style-singleton@2.2.1(@types/react@18.3.16)(react@18.3.1): dependencies: get-nonce: 1.0.1 invariant: 2.2.4 react: 18.3.1 - tslib: 2.8.0 + tslib: 2.8.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 react-transition-group@4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - '@babel/runtime': 7.25.9 + '@babel/runtime': 7.26.0 dom-helpers: 5.2.1 loose-envify: 1.4.0 prop-types: 15.8.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-universal-interface@0.6.2(react@18.3.1)(tslib@2.8.0): + react-universal-interface@0.6.2(react@18.3.1)(tslib@2.8.1): dependencies: react: 18.3.1 - tslib: 2.8.0 + tslib: 2.8.1 - react-use@17.5.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + react-use@17.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: '@types/js-cookie': 2.2.7 '@xobotyi/scrollbar-width': 1.9.5 @@ -20248,15 +20330,15 @@ snapshots: nano-css: 5.6.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-universal-interface: 0.6.2(react@18.3.1)(tslib@2.8.0) + react-universal-interface: 0.6.2(react@18.3.1)(tslib@2.8.1) resize-observer-polyfill: 1.5.1 screenfull: 5.2.0 set-harmonic-interval: 1.0.1 throttle-debounce: 3.0.1 ts-easing: 0.2.0 - tslib: 2.8.0 + tslib: 2.8.1 - react-virtuoso@4.12.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + react-virtuoso@4.12.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -20296,13 +20378,13 @@ snapshots: esprima: 4.0.1 source-map: 0.6.1 tiny-invariant: 1.3.3 - tslib: 2.8.0 + tslib: 2.8.1 recharts-scale@0.4.5: dependencies: decimal.js-light: 2.5.1 - recharts@2.13.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + recharts@2.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: clsx: 2.1.1 eventemitter3: 4.0.7 @@ -20310,45 +20392,48 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) react-is: 18.3.1 - react-smooth: 4.0.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react-smooth: 4.0.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) recharts-scale: 0.4.5 tiny-invariant: 1.3.3 victory-vendor: 36.9.2 - reflect.getprototypeof@1.0.6: + reflect.getprototypeof@1.0.8: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + dunder-proto: 1.0.0 + es-abstract: 1.23.5 es-errors: 1.3.0 - get-intrinsic: 1.2.4 - globalthis: 1.0.4 - which-builtin-type: 1.1.4 + get-intrinsic: 1.2.6 + gopd: 1.2.0 + which-builtin-type: 1.2.1 regenerator-runtime@0.14.1: {} - regex@4.3.3: {} + regex-recursion@4.3.0: + dependencies: + regex-utilities: 2.3.0 + + regex-utilities@2.3.0: {} + + regex@5.0.2: + dependencies: + regex-utilities: 2.3.0 regexp.prototype.flags@1.5.3: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 es-errors: 1.3.0 set-function-name: 2.0.2 - rehackt@0.1.0(@types/react@18.3.12)(react@18.3.1): - optionalDependencies: - '@types/react': 18.3.12 - react: 18.3.1 - optional: true - rehype-katex@7.0.1: dependencies: '@types/hast': 3.0.4 '@types/katex': 0.16.7 hast-util-from-html-isomorphic: 2.0.0 hast-util-to-text: 4.0.2 - katex: 0.16.11 + katex: 0.16.15 unist-util-visit-parents: 6.0.1 vfile: 6.0.3 @@ -20362,12 +20447,12 @@ snapshots: rehype-raw@7.0.0: dependencies: '@types/hast': 3.0.4 - hast-util-raw: 9.0.4 + hast-util-raw: 9.1.0 vfile: 6.0.3 relay-runtime@12.0.0(encoding@0.1.13): dependencies: - '@babel/runtime': 7.25.9 + '@babel/runtime': 7.26.0 fbjs: 3.0.5(encoding@0.1.13) invariant: 2.2.4 transitivePeerDependencies: @@ -20476,27 +20561,22 @@ snapshots: resolve-pkg-maps@1.0.0: {} - resolve.exports@2.0.2: {} - resolve@1.17.0: dependencies: path-parse: 1.0.7 - resolve@1.22.8: + resolve@1.22.9: dependencies: - is-core-module: 2.15.1 + is-core-module: 2.16.0 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 resolve@2.0.0-next.5: dependencies: - is-core-module: 2.15.1 + is-core-module: 2.16.0 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 - response-iterator@0.2.6: - optional: true - restore-cursor@3.1.0: dependencies: onetime: 5.1.2 @@ -20553,52 +20633,55 @@ snapshots: dependencies: estree-walker: 0.6.1 - rollup@4.24.0: + rollup@4.28.1: dependencies: '@types/estree': 1.0.6 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.24.0 - '@rollup/rollup-android-arm64': 4.24.0 - '@rollup/rollup-darwin-arm64': 4.24.0 - '@rollup/rollup-darwin-x64': 4.24.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.24.0 - '@rollup/rollup-linux-arm-musleabihf': 4.24.0 - '@rollup/rollup-linux-arm64-gnu': 4.24.0 - '@rollup/rollup-linux-arm64-musl': 4.24.0 - '@rollup/rollup-linux-powerpc64le-gnu': 4.24.0 - '@rollup/rollup-linux-riscv64-gnu': 4.24.0 - '@rollup/rollup-linux-s390x-gnu': 4.24.0 - '@rollup/rollup-linux-x64-gnu': 4.24.0 - '@rollup/rollup-linux-x64-musl': 4.24.0 - '@rollup/rollup-win32-arm64-msvc': 4.24.0 - '@rollup/rollup-win32-ia32-msvc': 4.24.0 - '@rollup/rollup-win32-x64-msvc': 4.24.0 + '@rollup/rollup-android-arm-eabi': 4.28.1 + '@rollup/rollup-android-arm64': 4.28.1 + '@rollup/rollup-darwin-arm64': 4.28.1 + '@rollup/rollup-darwin-x64': 4.28.1 + '@rollup/rollup-freebsd-arm64': 4.28.1 + '@rollup/rollup-freebsd-x64': 4.28.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.28.1 + '@rollup/rollup-linux-arm-musleabihf': 4.28.1 + '@rollup/rollup-linux-arm64-gnu': 4.28.1 + '@rollup/rollup-linux-arm64-musl': 4.28.1 + '@rollup/rollup-linux-loongarch64-gnu': 4.28.1 + '@rollup/rollup-linux-powerpc64le-gnu': 4.28.1 + '@rollup/rollup-linux-riscv64-gnu': 4.28.1 + '@rollup/rollup-linux-s390x-gnu': 4.28.1 + '@rollup/rollup-linux-x64-gnu': 4.28.1 + '@rollup/rollup-linux-x64-musl': 4.28.1 + '@rollup/rollup-win32-arm64-msvc': 4.28.1 + '@rollup/rollup-win32-ia32-msvc': 4.28.1 + '@rollup/rollup-win32-x64-msvc': 4.28.1 fsevents: 2.3.3 - rrdom@2.0.0-alpha.17: + rrdom@2.0.0-alpha.18: dependencies: - rrweb-snapshot: 2.0.0-alpha.17 + rrweb-snapshot: 2.0.0-alpha.18 rrweb-cssom@0.7.1: {} - rrweb-snapshot@2.0.0-alpha.17: + rrweb-snapshot@2.0.0-alpha.18: dependencies: postcss: 8.4.49 rrweb@2.0.0-alpha.13: dependencies: - '@rrweb/types': 2.0.0-alpha.17 + '@rrweb/types': 2.0.0-alpha.18 '@types/css-font-loading-module': 0.0.7 '@xstate/fsm': 1.6.5 base64-arraybuffer: 1.0.2 fflate: 0.4.8 mitt: 3.0.1 - rrdom: 2.0.0-alpha.17 - rrweb-snapshot: 2.0.0-alpha.17 + rrdom: 2.0.0-alpha.18 + rrweb-snapshot: 2.0.0-alpha.18 rtl-css-js@1.16.1: dependencies: - '@babel/runtime': 7.25.9 + '@babel/runtime': 7.26.0 run-async@2.4.1: {} @@ -20616,28 +20699,29 @@ snapshots: rxjs@7.8.1: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 sade@1.8.1: dependencies: mri: 1.2.0 - safe-array-concat@1.1.2: + safe-array-concat@1.1.3: dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 - has-symbols: 1.0.3 + call-bind: 1.0.8 + call-bound: 1.0.2 + get-intrinsic: 1.2.6 + has-symbols: 1.1.0 isarray: 2.0.5 safe-buffer@5.1.2: {} safe-buffer@5.2.1: {} - safe-regex-test@1.0.3: + safe-regex-test@1.1.0: dependencies: - call-bind: 1.0.7 + call-bound: 1.0.2 es-errors: 1.3.0 - is-regex: 1.1.4 + is-regex: 1.2.1 safe-stable-stringify@2.5.0: {} @@ -20668,13 +20752,13 @@ snapshots: scrypt-js@3.0.1: {} - search-insights@2.17.2: {} + search-insights@2.17.3: {} secp256k1@4.0.4: dependencies: - elliptic: 6.5.7 + elliptic: 6.6.1 node-addon-api: 5.1.0 - node-gyp-build: 4.8.2 + node-gyp-build: 4.8.4 section-matter@1.0.0: dependencies: @@ -20713,7 +20797,7 @@ snapshots: sentence-case@3.0.4: dependencies: no-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 upper-case-first: 2.0.2 sequelize-pool@7.1.0: {} @@ -20722,7 +20806,7 @@ snapshots: dependencies: '@types/debug': 4.1.12 '@types/validator': 13.12.2 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) dottie: 2.0.6 inflection: 1.13.4 lodash: 4.17.21 @@ -20762,8 +20846,8 @@ snapshots: define-data-property: 1.1.4 es-errors: 1.3.0 function-bind: 1.1.2 - get-intrinsic: 1.2.4 - gopd: 1.0.1 + get-intrinsic: 1.2.6 + gopd: 1.2.0 has-property-descriptors: 1.0.2 set-function-name@2.0.2: @@ -20803,21 +20887,42 @@ snapshots: vscode-oniguruma: 1.7.0 vscode-textmate: 8.0.0 - shiki@1.22.0: + shiki@1.24.2: dependencies: - '@shikijs/core': 1.22.0 - '@shikijs/engine-javascript': 1.22.0 - '@shikijs/engine-oniguruma': 1.22.0 - '@shikijs/types': 1.22.0 - '@shikijs/vscode-textmate': 9.3.0 + '@shikijs/core': 1.24.2 + '@shikijs/engine-javascript': 1.24.2 + '@shikijs/engine-oniguruma': 1.24.2 + '@shikijs/types': 1.24.2 + '@shikijs/vscode-textmate': 9.3.1 '@types/hast': 3.0.4 - side-channel@1.0.6: + side-channel-list@1.0.0: dependencies: - call-bind: 1.0.7 es-errors: 1.3.0 - get-intrinsic: 1.2.4 - object-inspect: 1.13.2 + object-inspect: 1.13.3 + + side-channel-map@1.0.1: + dependencies: + call-bound: 1.0.2 + es-errors: 1.3.0 + get-intrinsic: 1.2.6 + object-inspect: 1.13.3 + + side-channel-weakmap@1.0.2: + dependencies: + call-bound: 1.0.2 + es-errors: 1.3.0 + get-intrinsic: 1.2.6 + object-inspect: 1.13.3 + side-channel-map: 1.0.1 + + side-channel@1.1.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.3 + side-channel-list: 1.0.0 + side-channel-map: 1.0.1 + side-channel-weakmap: 1.0.2 siginfo@2.0.0: {} @@ -20843,12 +20948,12 @@ snapshots: snake-case@3.0.4: dependencies: dot-case: 3.0.4 - tslib: 2.8.0 + tslib: 2.8.1 - socks-proxy-agent@8.0.4: + socks-proxy-agent@8.0.5: dependencies: - agent-base: 7.1.1 - debug: 4.3.7(supports-color@8.1.1) + agent-base: 7.1.3 + debug: 4.4.0(supports-color@8.1.1) socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -20860,11 +20965,11 @@ snapshots: smart-buffer: 4.2.0 optional: true - solc@0.7.3(debug@4.3.7): + solc@0.7.3(debug@4.4.0): dependencies: command-exists: 1.2.9 commander: 3.0.2 - follow-redirects: 1.15.9(debug@4.3.7) + follow-redirects: 1.15.9(debug@4.4.0) fs-extra: 0.30.0 js-sha3: 0.8.0 memorystream: 0.3.1 @@ -20913,7 +21018,7 @@ snapshots: sponge-case@1.0.1: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 sprintf-js@1.0.3: {} @@ -20927,6 +21032,8 @@ snapshots: minipass: 7.1.2 optional: true + stable-hash@0.0.4: {} + stack-generator@2.0.10: dependencies: stackframe: 1.3.4 @@ -20957,7 +21064,7 @@ snapshots: statuses@2.0.1: {} - std-env@3.7.0: {} + std-env@3.8.0: {} stoppable@1.1.0: {} @@ -20983,46 +21090,50 @@ snapshots: string.prototype.includes@2.0.1: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 string.prototype.matchall@4.0.11: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 es-object-atoms: 1.0.0 - get-intrinsic: 1.2.4 - gopd: 1.0.1 - has-symbols: 1.0.3 - internal-slot: 1.0.7 + get-intrinsic: 1.2.6 + gopd: 1.2.0 + has-symbols: 1.1.0 + internal-slot: 1.1.0 regexp.prototype.flags: 1.5.3 set-function-name: 2.0.2 - side-channel: 1.0.6 + side-channel: 1.1.0 string.prototype.repeat@1.0.0: dependencies: define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 - string.prototype.trim@1.2.9: + string.prototype.trim@1.2.10: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 + call-bound: 1.0.2 + define-data-property: 1.1.4 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-object-atoms: 1.0.0 + has-property-descriptors: 1.0.2 - string.prototype.trimend@1.0.8: + string.prototype.trimend@1.0.9: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 + call-bound: 1.0.2 define-properties: 1.2.1 es-object-atoms: 1.0.0 string.prototype.trimstart@1.0.8: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 define-properties: 1.2.1 es-object-atoms: 1.0.0 @@ -21061,9 +21172,9 @@ snapshots: strip-json-comments@3.1.1: {} - strip-literal@2.1.0: + strip-literal@2.1.1: dependencies: - js-tokens: 9.0.0 + js-tokens: 9.0.1 strnum@1.0.5: {} @@ -21071,12 +21182,12 @@ snapshots: dependencies: inline-style-parser: 0.1.1 - styled-jsx@5.1.1(@babel/core@7.25.9)(react@18.3.1): + styled-jsx@5.1.1(@babel/core@7.26.0)(react@18.3.1): dependencies: client-only: 0.0.1 react: 18.3.1 optionalDependencies: - '@babel/core': 7.25.9 + '@babel/core': 7.26.0 styled-system@5.1.5: dependencies: @@ -21100,7 +21211,7 @@ snapshots: sucrase@3.35.0: dependencies: - '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/gen-mapping': 0.3.8 commander: 4.1.1 glob: 10.4.5 lines-and-columns: 1.2.4 @@ -21132,7 +21243,7 @@ snapshots: dependencies: define-data-property: 1.1.4 define-properties: 1.2.1 - es-abstract: 1.23.3 + es-abstract: 1.23.5 es-errors: 1.3.0 function-bind: 1.1.2 globalthis: 1.0.4 @@ -21141,21 +21252,18 @@ snapshots: swap-case@2.0.2: dependencies: - tslib: 2.8.0 - - symbol-observable@4.0.0: - optional: true + tslib: 2.8.1 symbol-tree@3.2.4: {} synckit@0.9.2: dependencies: '@pkgr/core': 0.1.1 - tslib: 2.8.0 + tslib: 2.8.1 tabbable@6.2.0: {} - tailwindcss@3.4.15(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)): + tailwindcss@3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)): dependencies: '@alloc/quick-lru': 5.2.0 arg: 5.0.2 @@ -21166,7 +21274,7 @@ snapshots: glob-parent: 6.0.2 is-glob: 4.0.3 jiti: 1.21.6 - lilconfig: 2.1.0 + lilconfig: 3.1.3 micromatch: 4.0.8 normalize-path: 3.0.0 object-hash: 3.0.0 @@ -21174,10 +21282,10 @@ snapshots: postcss: 8.4.49 postcss-import: 15.1.0(postcss@8.4.49) postcss-js: 4.0.1(postcss@8.4.49) - postcss-load-config: 4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3)) + postcss-load-config: 4.0.2(postcss@8.4.49)(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) postcss-nested: 6.2.0(postcss@8.4.49) postcss-selector-parser: 6.1.2 - resolve: 1.22.8 + resolve: 1.22.9 sucrase: 3.35.0 transitivePeerDependencies: - ts-node @@ -21204,15 +21312,15 @@ snapshots: text-table@0.2.0: {} - theme-ui@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1): + theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1): dependencies: - '@emotion/react': 11.13.3(@types/react@18.3.12)(react@18.3.1) - '@theme-ui/color-modes': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/components': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(@theme-ui/theme-provider@0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1))(react@18.3.1) - '@theme-ui/core': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/css': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1)) - '@theme-ui/global': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) - '@theme-ui/theme-provider': 0.17.1(@emotion/react@11.13.3(@types/react@18.3.12)(react@18.3.1))(react@18.3.1) + '@emotion/react': 11.14.0(@types/react@18.3.16)(react@18.3.1) + '@theme-ui/color-modes': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/components': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(@theme-ui/theme-provider@0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1))(react@18.3.1) + '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1)) + '@theme-ui/global': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) + '@theme-ui/theme-provider': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.16)(react@18.3.1))(react@18.3.1) react: 18.3.1 thenify-all@1.6.0: @@ -21248,7 +21356,7 @@ snapshots: tinyexec@0.3.1: {} - tinyglobby@0.2.9: + tinyglobby@0.2.10: dependencies: fdir: 6.4.2(picomatch@4.0.2) picomatch: 4.0.2 @@ -21259,7 +21367,7 @@ snapshots: title-case@3.0.3: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 title@3.5.3: dependencies: @@ -21293,7 +21401,7 @@ snapshots: tough-cookie@4.1.4: dependencies: - psl: 1.9.0 + psl: 1.15.0 punycode: 2.3.1 universalify: 0.2.0 url-parse: 1.5.10 @@ -21314,9 +21422,9 @@ snapshots: trough@2.2.0: {} - ts-api-utils@1.3.0(typescript@5.6.3): + ts-api-utils@1.4.3(typescript@5.7.2): dependencies: - typescript: 5.6.3 + typescript: 5.7.2 ts-dedent@2.2.0: {} @@ -21324,31 +21432,26 @@ snapshots: ts-interface-checker@0.1.13: {} - ts-invariant@0.10.3: - dependencies: - tslib: 2.8.0 - optional: true - ts-morph@23.0.0: dependencies: '@ts-morph/common': 0.24.0 code-block-writer: 13.0.3 - ts-node@10.9.2(@types/node@22.7.8)(typescript@5.6.3): + ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2): dependencies: '@cspotcode/source-map-support': 0.8.1 '@tsconfig/node10': 1.0.11 '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 '@tsconfig/node16': 1.0.4 - '@types/node': 22.7.8 - acorn: 8.13.0 + '@types/node': 22.10.2 + acorn: 8.14.0 acorn-walk: 8.3.4 arg: 4.1.3 create-require: 1.1.1 diff: 4.0.2 make-error: 1.3.6 - typescript: 5.6.3 + typescript: 5.7.2 v8-compile-cache-lib: 3.0.1 yn: 3.1.1 @@ -21373,31 +21476,31 @@ snapshots: tslib@2.6.3: {} - tslib@2.8.0: {} + tslib@2.8.1: {} tsort@0.0.1: {} - tsup@8.3.5(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.6.0): + tsup@8.3.5(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.7.2)(yaml@2.6.1): dependencies: bundle-require: 5.0.0(esbuild@0.24.0) cac: 6.7.14 chokidar: 4.0.1 consola: 3.2.3 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) esbuild: 0.24.0 joycon: 3.1.1 picocolors: 1.1.1 - postcss-load-config: 6.0.1(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.0) + postcss-load-config: 6.0.1(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.1) resolve-from: 5.0.0 - rollup: 4.24.0 + rollup: 4.28.1 source-map: 0.8.0-beta.0 sucrase: 3.35.0 tinyexec: 0.3.1 - tinyglobby: 0.2.9 + tinyglobby: 0.2.10 tree-kill: 1.2.2 optionalDependencies: postcss: 8.4.49 - typescript: 5.6.3 + typescript: 5.7.2 transitivePeerDependencies: - jiti - supports-color @@ -21465,45 +21568,46 @@ snapshots: typed-array-buffer@1.0.2: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 es-errors: 1.3.0 is-typed-array: 1.1.13 typed-array-byte-length@1.0.1: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 + gopd: 1.2.0 + has-proto: 1.2.0 is-typed-array: 1.1.13 - typed-array-byte-offset@1.0.2: + typed-array-byte-offset@1.0.3: dependencies: available-typed-arrays: 1.0.7 - call-bind: 1.0.7 + call-bind: 1.0.8 for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 + gopd: 1.2.0 + has-proto: 1.2.0 is-typed-array: 1.1.13 + reflect.getprototypeof: 1.0.8 - typed-array-length@1.0.6: + typed-array-length@1.0.7: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 + gopd: 1.2.0 is-typed-array: 1.1.13 possible-typed-array-names: 1.0.0 + reflect.getprototypeof: 1.0.8 typedarray@0.0.6: {} - typescript@5.4.2: {} + typescript@5.5.4: {} - typescript@5.6.3: {} + typescript@5.7.2: {} typy@3.3.0: {} - uWebSockets.js@https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/442087c0a01bf146acb7386910739ec81df06700: + uWebSockets.js@https://codeload.github.com/uNetworking/uWebSockets.js/tar.gz/6609a88ffa9a16ac5158046761356ce03250a0df: optional: true ua-parser-js@1.0.39: {} @@ -21512,10 +21616,10 @@ snapshots: unbox-primitive@1.0.2: dependencies: - call-bind: 1.0.7 + call-bind: 1.0.8 has-bigints: 1.0.2 - has-symbols: 1.0.3 - which-boxed-primitive: 1.0.2 + has-symbols: 1.1.0 + which-boxed-primitive: 1.1.0 unc-path-regex@0.1.2: {} @@ -21523,13 +21627,13 @@ snapshots: undici-types@5.26.5: {} - undici-types@6.19.8: {} + undici-types@6.20.0: {} undici@5.28.4: dependencies: '@fastify/busboy': 2.1.1 - unenv-nightly@2.0.0-20241024-111401-d4156ac: + unenv-nightly@2.0.0-20241204-140205-a5d5190: dependencies: defu: 6.1.4 ohash: 1.1.4 @@ -21546,10 +21650,10 @@ snapshots: '@types/concat-stream': 2.0.3 '@types/debug': 4.1.12 '@types/is-empty': 1.2.3 - '@types/node': 18.19.58 + '@types/node': 18.19.68 '@types/unist': 2.0.11 concat-stream: 2.0.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) fault: 2.0.1 glob: 8.1.0 ignore: 5.3.2 @@ -21564,7 +21668,7 @@ snapshots: vfile-message: 3.1.4 vfile-reporter: 7.0.5 vfile-statistics: 2.0.1 - yaml: 2.6.0 + yaml: 2.6.1 transitivePeerDependencies: - supports-color @@ -21697,7 +21801,7 @@ snapshots: unist-util-is: 6.0.0 unist-util-visit-parents: 6.0.1 - universal-cookie@7.2.1: + universal-cookie@7.2.2: dependencies: '@types/cookie': 0.6.0 cookie: 0.7.2 @@ -21714,19 +21818,19 @@ snapshots: unpipe@1.0.0: {} - update-browserslist-db@1.1.1(browserslist@4.24.2): + update-browserslist-db@1.1.1(browserslist@4.24.3): dependencies: - browserslist: 4.24.2 + browserslist: 4.24.3 escalade: 3.2.0 picocolors: 1.1.1 upper-case-first@2.0.2: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 upper-case@2.0.2: dependencies: - tslib: 2.8.0 + tslib: 2.8.1 uri-js@4.4.1: dependencies: @@ -21739,28 +21843,28 @@ snapshots: urlpattern-polyfill@10.0.0: {} - use-callback-ref@1.3.2(@types/react@18.3.12)(react@18.3.1): + use-callback-ref@1.3.2(@types/react@18.3.16)(react@18.3.1): dependencies: react: 18.3.1 - tslib: 2.8.0 + tslib: 2.8.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - use-isomorphic-layout-effect@1.1.2(@types/react@18.3.12)(react@18.3.1): + use-isomorphic-layout-effect@1.2.0(@types/react@18.3.16)(react@18.3.1): dependencies: react: 18.3.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - use-sidecar@1.1.2(@types/react@18.3.12)(react@18.3.1): + use-sidecar@1.1.2(@types/react@18.3.16)(react@18.3.1): dependencies: detect-node-es: 1.1.0 react: 18.3.1 - tslib: 2.8.0 + tslib: 2.8.1 optionalDependencies: - '@types/react': 18.3.12 + '@types/react': 18.3.16 - use-sync-external-store@1.2.2(react@18.3.1): + use-sync-external-store@1.4.0(react@18.3.1): dependencies: react: 18.3.1 @@ -21848,7 +21952,7 @@ snapshots: '@types/d3-interpolate': 3.0.4 '@types/d3-scale': 4.0.8 '@types/d3-shape': 3.1.6 - '@types/d3-time': 3.0.3 + '@types/d3-time': 3.0.4 '@types/d3-timer': 3.0.2 d3-array: 3.2.4 d3-ease: 3.0.1 @@ -21858,13 +21962,13 @@ snapshots: d3-time: 3.1.0 d3-timer: 3.0.1 - vite-node@1.6.0(@types/node@22.7.8): + vite-node@1.6.0(@types/node@22.10.2): dependencies: cac: 6.7.14 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) pathe: 1.1.2 picocolors: 1.1.1 - vite: 5.4.9(@types/node@22.7.8) + vite: 5.4.11(@types/node@22.10.2) transitivePeerDependencies: - '@types/node' - less @@ -21876,16 +21980,16 @@ snapshots: - supports-color - terser - vite@5.4.9(@types/node@22.7.8): + vite@5.4.11(@types/node@22.10.2): dependencies: esbuild: 0.21.5 postcss: 8.4.49 - rollup: 4.24.0 + rollup: 4.28.1 optionalDependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 fsevents: 2.3.3 - vitest@1.6.0(@types/node@22.7.8)(jsdom@24.1.3): + vitest@1.6.0(@types/node@22.10.2)(jsdom@24.1.3): dependencies: '@vitest/expect': 1.6.0 '@vitest/runner': 1.6.0 @@ -21894,21 +21998,21 @@ snapshots: '@vitest/utils': 1.6.0 acorn-walk: 8.3.4 chai: 4.5.0 - debug: 4.3.7(supports-color@8.1.1) + debug: 4.4.0(supports-color@8.1.1) execa: 8.0.1 - local-pkg: 0.5.0 - magic-string: 0.30.12 + local-pkg: 0.5.1 + magic-string: 0.30.15 pathe: 1.1.2 picocolors: 1.1.1 - std-env: 3.7.0 - strip-literal: 2.1.0 + std-env: 3.8.0 + strip-literal: 2.1.1 tinybench: 2.9.0 tinypool: 0.8.4 - vite: 5.4.9(@types/node@22.7.8) - vite-node: 1.6.0(@types/node@22.7.8) + vite: 5.4.11(@types/node@22.10.2) + vite-node: 1.6.0(@types/node@22.10.2) why-is-node-running: 2.3.0 optionalDependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 jsdom: 24.1.3 transitivePeerDependencies: - less @@ -21950,7 +22054,7 @@ snapshots: whatwg-mimetype@4.0.0: {} - whatwg-url@14.0.0: + whatwg-url@14.1.0: dependencies: tr46: 5.0.0 webidl-conversions: 7.0.0 @@ -21966,28 +22070,29 @@ snapshots: tr46: 1.0.1 webidl-conversions: 4.0.2 - which-boxed-primitive@1.0.2: + which-boxed-primitive@1.1.0: dependencies: - is-bigint: 1.0.4 - is-boolean-object: 1.1.2 - is-number-object: 1.0.7 - is-string: 1.0.7 - is-symbol: 1.0.4 + is-bigint: 1.1.0 + is-boolean-object: 1.2.1 + is-number-object: 1.1.0 + is-string: 1.1.0 + is-symbol: 1.1.1 - which-builtin-type@1.1.4: + which-builtin-type@1.2.1: dependencies: + call-bound: 1.0.2 function.prototype.name: 1.1.6 has-tostringtag: 1.0.2 is-async-function: 2.0.0 - is-date-object: 1.0.5 - is-finalizationregistry: 1.0.2 + is-date-object: 1.1.0 + is-finalizationregistry: 1.1.0 is-generator-function: 1.0.10 - is-regex: 1.1.4 - is-weakref: 1.0.2 + is-regex: 1.2.1 + is-weakref: 1.1.0 isarray: 2.0.5 - which-boxed-primitive: 1.0.2 + which-boxed-primitive: 1.1.0 which-collection: 1.0.2 - which-typed-array: 1.1.15 + which-typed-array: 1.1.16 which-collection@1.0.2: dependencies: @@ -21998,12 +22103,12 @@ snapshots: which-module@2.0.1: {} - which-typed-array@1.1.15: + which-typed-array@1.1.16: dependencies: available-typed-arrays: 1.0.7 - call-bind: 1.0.7 + call-bind: 1.0.8 for-each: 0.3.3 - gopd: 1.0.1 + gopd: 1.2.0 has-tostringtag: 1.0.2 which@1.3.1: @@ -22035,26 +22140,26 @@ snapshots: wkx@0.5.0: dependencies: - '@types/node': 22.7.8 + '@types/node': 22.10.2 wonka@6.3.4: {} word-wrap@1.2.5: {} - workerd@1.20241106.1: + workerd@1.20241205.0: optionalDependencies: - '@cloudflare/workerd-darwin-64': 1.20241106.1 - '@cloudflare/workerd-darwin-arm64': 1.20241106.1 - '@cloudflare/workerd-linux-64': 1.20241106.1 - '@cloudflare/workerd-linux-arm64': 1.20241106.1 - '@cloudflare/workerd-windows-64': 1.20241106.1 + '@cloudflare/workerd-darwin-64': 1.20241205.0 + '@cloudflare/workerd-darwin-arm64': 1.20241205.0 + '@cloudflare/workerd-linux-64': 1.20241205.0 + '@cloudflare/workerd-linux-arm64': 1.20241205.0 + '@cloudflare/workerd-windows-64': 1.20241205.0 workerpool@6.5.1: {} - wrangler@3.87.0(@cloudflare/workers-types@4.20241112.0): + wrangler@3.95.0(@cloudflare/workers-types@4.20241205.0): dependencies: '@cloudflare/kv-asset-handler': 0.3.4 - '@cloudflare/workers-shared': 0.7.1 + '@cloudflare/workers-shared': 0.11.0 '@esbuild-plugins/node-globals-polyfill': 0.2.3(esbuild@0.17.19) '@esbuild-plugins/node-modules-polyfill': 0.2.2(esbuild@0.17.19) blake3-wasm: 2.1.5 @@ -22062,18 +22167,17 @@ snapshots: date-fns: 4.1.0 esbuild: 0.17.19 itty-time: 1.0.6 - miniflare: 3.20241106.0 - nanoid: 3.3.7 + miniflare: 3.20241205.0 + nanoid: 3.3.8 path-to-regexp: 6.3.0 - resolve: 1.22.8 - resolve.exports: 2.0.2 + resolve: 1.22.9 selfsigned: 2.4.1 source-map: 0.6.1 - unenv: unenv-nightly@2.0.0-20241024-111401-d4156ac - workerd: 1.20241106.1 - xxhash-wasm: 1.0.2 + unenv: unenv-nightly@2.0.0-20241204-140205-a5d5190 + workerd: 1.20241205.0 + xxhash-wasm: 1.1.0 optionalDependencies: - '@cloudflare/workers-types': 4.20241112.0 + '@cloudflare/workers-types': 4.20241205.0 fsevents: 2.3.3 transitivePeerDependencies: - bufferutil @@ -22114,7 +22218,7 @@ snapshots: xtend@4.0.2: {} - xxhash-wasm@1.0.2: {} + xxhash-wasm@1.1.0: {} y18n@4.0.3: {} @@ -22129,7 +22233,7 @@ snapshots: yaml@1.10.2: {} - yaml@2.6.0: {} + yaml@2.6.1: {} yargs-parser@18.1.3: dependencies: @@ -22195,18 +22299,10 @@ snapshots: mustache: 4.2.0 stacktracey: 2.1.8 - zen-observable-ts@1.2.5: - dependencies: - zen-observable: 0.8.15 - optional: true - - zen-observable@0.8.15: - optional: true - - zod-validation-error@3.4.0(zod@3.23.8): + zod-validation-error@3.4.0(zod@3.24.1): dependencies: - zod: 3.23.8 + zod: 3.24.1 - zod@3.23.8: {} + zod@3.24.1: {} zwitch@2.0.4: {} diff --git a/website/package.json b/website/package.json index 416820ed28b8..35e25ac8485e 100644 --- a/website/package.json +++ b/website/package.json @@ -4,24 +4,25 @@ "type": "module", "private": true, "scripts": { - "build": "rm -rf .next && rm -rf out && next build", "dev": "next", + "build": "rm -rf .next && rm -rf out && next build", "fetch-remote-filepaths": "tsx scripts/fetch-remote-filepaths.ts", - "postbuild": "next-sitemap --config next-sitemap.config.mjs && node scripts/sitemap-ci.js", - "prebuild": "pnpm fetch-remote-filepaths", + "fix-pages-structure": "tsx scripts/fix-pages-structure.ts", "predev": "pnpm fetch-remote-filepaths", + "prebuild": "pnpm fetch-remote-filepaths && pnpm fix-pages-structure", + "postbuild": "next-sitemap --config next-sitemap.config.mjs && node scripts/sitemap-ci.js", "typecheck": "tsc --noEmit" }, "dependencies": { - "@edgeandnode/common": "^6.30.0", - "@edgeandnode/gds": "~5.34.0", - "@edgeandnode/go": "~6.60.0", - "@emotion/react": "^11.13.3", + "@edgeandnode/common": "^6.38.0", + "@edgeandnode/gds": "^5.39.1", + "@edgeandnode/go": "^6.74.0", + "@emotion/react": "^11.14.0", "@graphprotocol/contracts": "6.2.1", "@graphprotocol/nextra-theme": "workspace:*", "@phosphor-icons/react": "^2.1.7", "mixpanel-browser": "^2.56.0", - "next": "^14.2.18", + "next": "^14.2.20", "next-seo": "^6.6.0", "next-sitemap": "^4.2.3", "nextra": "^2.13.4", @@ -35,13 +36,13 @@ "@graphprotocol/client-cli": "3.0.3", "@types/mdast": "^4.0.4", "@types/mixpanel-browser": "^2.50.2", - "@types/react": "^18.3.12", - "@types/react-dom": "^18.3.1", + "@types/react": "^18.3.16", + "@types/react-dom": "^18.3.5", "autoprefixer": "^10.4.20", "fast-xml-parser": "^4.5.0", "graphql": "^16.9.0", "postcss": "^8.4.49", - "tailwindcss": "^3.4.15", + "tailwindcss": "^3.4.16", "tsx": "^4.19.2", "unified": "^11.0.5" } diff --git a/website/pages/ar/_meta.js b/website/pages/ar/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/ar/_meta.js +++ b/website/pages/ar/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 327809be6460..000000000000 --- a/website/pages/ar/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph اسم** - بالاضافة لاسم الحساب الذي تم إنشاء الـ subgraph ضمنه، سيؤدي هذا أيضا إلى تعريف اسم النمط `account-name/subgraph-name` - المستخدم لعمليات النشر و GraphQL endpoints. _لا يمكن تغيير هذا الحقل لاحقا._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/ar/deploying/hosted-service.mdx b/website/pages/ar/deploying/hosted-service.mdx deleted file mode 100644 index 73e4e778675c..000000000000 --- a/website/pages/ar/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: ما هي الخدمة المستضافة (Hosted Service)؟ ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## إنشاء الـ Subgraph - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### من عقد موجود - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### من مثال Subgraph - -الوضع الثاني الذي يدعمه `graph init` هو إنشاء مشروع جديد من مثال subgraph. الأمر التالي يقوم بهذا: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/ar/deploying/subgraph-studio.mdx b/website/pages/ar/deploying/subgraph-studio.mdx deleted file mode 100644 index e88c2912787b..000000000000 --- a/website/pages/ar/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -مرحبًا بك في منصة الاطلاق الجديدة الخاصة بك 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- إنشاء subgraph من خلال Studio UI -- نشر subgraph باستخدام CLI -- نشر subgraph باستخدام Studio UI -- اختبار الـ subgraph في playground -- دمجه في المنصة باستخدام عنوان URL الاستعلام -- إنشاء وإدارة مفاتيح API الخاصة بك لـ subgraphs محددة - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- التحكم في حساب المستخدم الخاص بك -- قائمة من الـ subgraphs التي قمت بإنشائها -- A section to manage, view details and visualize the status of a specific subgraph -- قسم لإدارة مفاتيح API الخاصة بك والتي ستحتاجها للاستعلام عن subgraph -- قسم لإدارة الفواتير الخاصة بك - -## كيف تنشئ حسابك - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## توافق الـ Subgraph مع شبكة The Graph - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- يجب ألا تستخدم أيًا من الميزات التالية: - - ipfs.cat & ipfs.map - - أخطاء غير فادحة - - تطعيم(Grafting) - -المزيد من الميزات والشبكات ستتم إضافتها إلى شبكة The Graph بشكل متزايد. - -### تدفق دورة حياة الـ Subgraph - -![دورة حياة الـ Subgraph](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## اختبار الـ Subgraph الخاص بك في Subgraph Studio - -إذا كنت ترغب في اختبار الـ subgraph قبل نشره على الشبكة ، فيمكنك القيام بذلك في Subgraph **Playground** أو إلقاء نظرة على سجلاتك. ستخبرك سجلات الـ Subgraph ** أين ** يفشل الـ subgraph في حالة حدوث ذلك. - -## نشر الـ Subgraph الخاص بك في Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -الـ Subgraphs التي لها إشارة تنسيق يتم عرضها للمفهرسين بحيث يمكن فهرستها على الشبكة اللامركزية. يمكنك نشر الـ subgraphs والإشارة إليها في إجراء واحد ، مما يتيح لك صك أول إشارة تنسيق على الـ subgraph وتوفير تكاليف الغاز. ومن خلال إضافة إشارتك إلى الإشارة التي قدمها المنسقون لاحقًا ، سيكون للـ subgraph الخاص بك فرصة أكبر لتقديم الاستعلامات في النهاية. - -** الآن بعد أن نشرت الـ subgraph الخاص بك ، دعنا ندخل في كيفية إدارتهم على أساس منتظم. ** لاحظ أنه لا يمكنك نشر الـ subgraph على الشبكة إذا قد فشلت المزامنة. يحدث هذا عادةً بسبب وجود أخطاء في الـ subgraph - ستخبرك السجلات بمكان وجود هذه المشكلات! - -## تعديل إصدار الـ Subgraph الخاص بك باستخدام CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### الأرشفة التلقائية لإصدارات الـ Subgraph - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio -إلغاء أرشفة](/img/Unarchive.png) diff --git a/website/pages/ar/developing/creating-a-subgraph.mdx b/website/pages/ar/developing/creating-a-subgraph.mdx deleted file mode 100644 index 499b60d49bb9..000000000000 --- a/website/pages/ar/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: إنشاء subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -يتكون تعريف Subgraph من عدة ملفات: - -- `Subgraph.yaml`ملف YAML يحتوي على Subgraph manifest - -- `schema.graphql`: مخطط GraphQL يحدد البيانات المخزنة في Subgraph وكيفية الاستعلام عنها عبر GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) كود يترجم من بيانات الحدث إلى الكيانات المعرفة في مخططك (مثل`mapping.ts` في هذا الدرس) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## قم بتثبيت Graph CLI - -تمت كتابة Graph CLI بلغة JavaScript ، وستحتاج إلى تثبيت إما `yarn` أو `npm` لاستخدامها ؛ ومن المفترض أن يكون لديك yarn كالتالي. - -بمجرد حصولك على `yarn` ، قم بتثبيت Graph CLI عن طريق تشغيل - -**التثبيت بواسطة yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**التثبيت بواسطة npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## من عقد موجود - -الأمر التالي ينشئ subgraph يفهرس كل الأحداث للعقد الموجود. إنه يحاول جلب ABI للعقد من Etherscan ويعود إلى طلب مسار ملف محلي. إذا كانت أي من arguments الاختيارية مفقودة ، فسيأخذك عبر نموذج تفاعلي. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` هو ID لـ subgraph الخاص بك في Subgraph Studio ، ويمكن العثور عليه في صفحة تفاصيل الـ subgraph. - -## من مثال Subgraph - -الوضع الثاني الذي يدعمه `graph init` هو إنشاء مشروع جديد من مثال subgraph. الأمر التالي يقوم بهذا: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -Subgraph manifest `subgraph.yaml` تحدد العقود الذكية لفهارس الـ subgraph الخاص بك ، والأحداث من هذه العقود التي يجب الانتباه إليها ، وكيفية عمل map لبيانات الأحداث للكيانات التي تخزنها Graph Node وتسمح بالاستعلام عنها. يمكن العثور على المواصفات الكاملة لـ subgraph manifests [ هنا ](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -بالنسبة لمثال الـ subgraph ،يكون الـ `subgraph.yaml`: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -الإدخالات الهامة لتحديث manifest هي: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: قائمة بجميع أسماء الـ [ الميزات](#experimental-features) المستخدمة. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: ملف ABI واحد أو أكثر لعقد المصدر بالإضافة إلى العقود الذكية الأخرى والتي تتفاعل معها من داخل الـ mappings. - -- `dataSources.mapping.eventHandlers`: يضع قائمة بأحداث العقود الذكية والتي يتفاعل معها هذا الـ subgraph ويعالجها في (mapping) الـتخطيط —./src/mapping.ts في المثال - والتي تحول هذه الأحداث إلى كيانات في المخزن. - -- `dataSources.mapping.callHandlers`: يضع قائمة بدوال العقود الذكية والتي يتفاعل معها هذا الـ subgraph ويعالجها في الـ mapping التي تحول المدخلات والمخرجات لاستدعاءات الدوال إلى كيانات في المخزن. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -يمكن لـ subgraph واحد فهرسة البيانات من عقود ذكية متعددة. أضف إدخالا لكل عقد يجب فهرسة البيانات منه إلى مصفوفة `dataSources`. - -### Order of Triggering Handlers - -يتم ترتيب المشغلات (triggers) لمصدر البيانات داخل الكتلة باستخدام العملية التالية: - -1. يتم ترتيب triggers الأحداث والاستدعاءات أولا من خلال فهرس الإجراء داخل الكتلة. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. يتم تشغيل مشغلات الكتلة بعد مشغلات الحدث والاستدعاء، بالترتيب المحدد في الـ manifest. - -قواعد الترتيب هذه عرضة للتغيير. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| الاصدار | ملاحظات الإصدار | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### الحصول على ABIs - -يجب أن تتطابق ملف (ملفات) ABI مع العقد (العقود) الخاصة بك. هناك عدة طرق للحصول على ملفات ABI: - -- إذا كنت تقوم ببناء مشروعك الخاص ، فمن المحتمل أن تتمكن من الوصول إلى أحدث ABIs. -- إذا كنت تقوم ببناء subgraph لمشروع عام ، فيمكنك تنزيل هذا المشروع على جهاز الكمبيوتر الخاص بك والحصول على ABI باستخدام [ `truffle compile` ](https://truffleframework.com/docs/truffle/overview) أو استخدام solc للترجمة. -- يمكنك أيضا العثور على ABI على [ Etherscan ](https://etherscan.io/) ، ولكن هذا ليس موثوقا به دائما ، حيث قد يكون ABI الذي تم تحميله هناك قديما. تأكد من أن لديك ABI الصحيح ، وإلا فإن تشغيل الـ subgraph الخاص بك سيفشل. - -## مخطط GraphQL - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## تعريف الكيانات - -قبل تعريف الكيانات ، من المهم التراجع والتفكير في كيفية هيكلة بياناتك وربطها. سيتم إجراء جميع الاستعلامات لنموذج البيانات المعرفة في مخطط الـ subgraph والكيانات المفهرسة بواسطة الـ subgraph. لهذا السبب ، من الجيد تعريف مخطط الـ subgraph بطريقة تتوافق مع احتياجات الـ dapp الخاص بك. قد يكون من المفيد تصور الكيانات على أنها "كائنات (objects) تحتوي على بيانات" ، وليس أحداثا أو دوال. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### مثال جيد - -تم تنظيم الكيان `Gravatar` أدناه حول كائن Gravatar وهو مثال جيد لكيفية تعريف الكيان. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### مثال سيئ - -يستند مثالان الكيانات أدناه `GravatarAccepted` و `GravatarDeclined` إلى أحداث. لا يوصى بعمل map للأحداث أو لاستدعاءات الدوال للكيانات 1: 1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### الحقول الاختيارية والمطلوبة - -يمكن تعريف حقول الكيانات على أنها مطلوبة أو اختيارية. الحقول المطلوبة يشار إليها بواسطة `!` في المخطط. إذا لم يتم تعيين حقل مطلوب في الـ mapping ، فستتلقى هذا الخطأ عند الاستعلام عن الحقل: - -``` -'Null value resolved for non-null field 'name -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### أنواع المقاييس المضمنة - -#### المقاييس المدعومة من GraphQL - -We support the following scalars in our GraphQL API: - -| النوع | الوصف | -| --- | --- | -| `Bytes` | مصفوفة Byte ، ممثلة كسلسلة سداسية عشرية. يشيع استخدامها في Ethereum hashes وعناوينه. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### علاقات الكيانات - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### العلاقات واحد-لواحد - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### علاقات واحد-لمتعدد - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### البحث العكسي - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### مثال - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### علاقات متعدد_لمتعدد - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### مثال - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### إضافة تعليقات إلى المخطط (schema) - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## تعريف حقول البحث عن النص الكامل - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[ إدارة الميزات ](#experimental-features): ** من `specVersion` `0.0.4` وما بعده ، يجب الإعلان عن `fullTextSearch` ضمن قسم `features` في subgraph manifest. - -### اللغات المدعومة - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| الرمز | القاموس | -| ------ | ---------- | -| simple | عام | -| da | دنماركي | -| nl | هولندي | -| en | إنجليزي | -| fi | فنلندي | -| fr | فرنسي | -| de | ألماني | -| hu | مجري | -| it | إيطالي | -| no | نرويجي | -| pt | Portuguese | -| ro | روماني | -| ru | روسي | -| es | إسباني | -| sv | سويدي | -| tr | تركي | - -### خوارزميات التصنيف - -Supported algorithms for ordering results: - -| الخوارزمية | الوصف | -| ------------- | ------------------------------------------------------------ | -| rank | استخدم جودة مطابقة استعلام النص-الكامل (0-1) لترتيب النتائج. | -| proximityRank | مشابه لـ rank ولكنه يشمل أيضا القرب من المطابقات. | - -## كتابة الـ Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### الـ IDs الموصى بها لإنشاء كيانات جديدة - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## توليد الكود - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -'import { Gravatar } from '../generated/schema -``` - -> **ملحوظات:** يجب إجراء إنشاء الكود مرة أخرى بعد كل تغيير في مخطط GraphQL أو ABI المضمنة في الـ يظهر. يجب أيضا إجراؤه مرة واحدة على الأقل قبل بناء أو نشر الـ الفرعيةرسم بياني. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## قوالب مصدر البيانات - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### مصدر البيانات للعقد الرئيسي - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### قوالب مصدر البيانات للعقود التي تم إنشاؤها ديناميكيا - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### إنشاء قالب مصدر البيانات - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **ملاحظة:** مصدر البيانات الجديد سيعالج فقط الاستدعاءات والأحداث للكتلة التي تم إنشاؤها فيه وجميع الكتل التالية ، ولكنه لن يعالج البيانات التاريخية ، أي البيانات الموجودة في الكتل السابقة. -> -> إذا كانت الكتل السابقة تحتوي على بيانات ذات صلة بمصدر البيانات الجديد ، فمن الأفضل فهرسة تلك البيانات من خلال قراءة الحالة الحالية للعقد وإنشاء كيانات تمثل تلك الحالة في وقت إنشاء مصدر البيانات الجديد. - -### سياق مصدر البيانات - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## كتل البدء - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **ملاحظة:** يمكن البحث عن كتلة إنشاء العقد بسرعة على Etherscan: -> -> 1. ابحث عن العقد بإدخال عنوانه في شريط البحث. -> 2. انقر فوق hash إجراء الإنشاء في قسم `Contract Creator`. -> 3. قم بتحميل صفحة تفاصيل الإجراء(transaction) حيث ستجد كتلة البدء لذلك العقد. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## معالجات الاستدعاء(Call Handlers) - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### تعريف معالج الاستدعاء - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### دالة الـ Mapping - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## معالجات الكتلة - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### الفلاتر المدعومة - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### دالة الـ Mapping - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## أحداث الـ مجهول - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| الميزة | الاسم | -| ---------------------------------------------------- | ---------------- | -| [أخطاء غير فادحة](#non-fatal-errors) | `nonFatalErrors` | -| [البحث عن نص كامل](#defining-fulltext-search-fields) | `fullTextSearch` | -| [تطعيم(Grafting)](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### أخطاء غير فادحة - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **ملاحظة:** لا تدعم شبكة Graph حتى الآن الأخطاء غير الفادحة ، ويجب على المطورين عدم نشر الـ subgraphs على الشبكة باستخدام تلك الدالة عبر الـ Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting على Subgraphs موجودة - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- يضيف أو يزيل أنواع الكيانات -- يزيل الصفات من أنواع الكيانات -- يضيف صفات nullable لأنواع الكيانات -- يحول صفات non-nullable إلى صفات nullable -- يضيف قيما إلى enums -- يضيف أو يزيل الواجهات -- يغير للكيانات التي يتم تنفيذ الواجهة لها - -> **[إدارة الميزات](#experimental-features):**يجب الإعلان عن `التطعيم` ضمن `features` في الفرعيةرسم بياني يظهر. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### نظره عامة - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### المراجع - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ar/developing/creating-a-subgraph/_meta.js b/website/pages/ar/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/ar/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ar/developing/graph-ts/_meta.js b/website/pages/ar/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/ar/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ar/managing/deprecate-a-subgraph.mdx b/website/pages/ar/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/ar/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/ar/mips-faqs.mdx b/website/pages/ar/mips-faqs.mdx deleted file mode 100644 index dfbc9049c656..000000000000 --- a/website/pages/ar/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## مقدمة - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/ar/network/_meta.js b/website/pages/ar/network/_meta.js index 3552bf25eb0b..49858537c885 100644 --- a/website/pages/ar/network/_meta.js +++ b/website/pages/ar/network/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - overview: 'نظره عامة', } diff --git a/website/pages/ar/querying/_meta.js b/website/pages/ar/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/ar/querying/_meta.js +++ b/website/pages/ar/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/ar/querying/graph-client/_meta.js b/website/pages/ar/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/ar/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/_meta.js b/website/pages/cs/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/cs/_meta.js +++ b/website/pages/cs/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/cs/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/cs/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 754f59ece522..000000000000 --- a/website/pages/cs/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Nasazení podgrafu do hostované služby ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -Tato stránka vysvětluje, jak nasadit podgraf do hostované služby. K nasazení podgrafu je nejprve potřeba nainstalovat [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). Pokud jste již podgraf nevytvořili, podívejte se na [vytvoření podgrafu](/developing/creating-a-subgraph). - -## Vytvoření účtu hostované služby - -Před použitím hostované služby si vytvořte účet v naší hostované službě. K tomu budete potřebovat účet [Github](https://github.com/); pokud jej nemáte, musíte si jej nejprve vytvořit. Poté přejděte do [hostované služby](https://thegraph.com/hosted-service/), klikněte na tlačítko _"Zaregistrovat se u Githubu"_ a dokončete autorizační proces Githubu. - -## Uložení přístupového tokenu - -Po vytvoření účtu přejděte do svého [nástěnky](https://thegraph.com/hosted-service/dashboard). Zkopírujte přístupový token zobrazený na nástěnce a spusťte příkaz `graph auth --product hosted-service `. Tím se přístupový token uloží na váš počítač. Toto je třeba udělat pouze jednou, nebo pokud někdy obnovíte přístupový token. - -## Vytvoření podgrafu v hostované službě - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Obrázek** - Vyberte obrázek, který se použije jako náhledový obrázek a miniatura podgrafu. - -**Název podgrafu** – Spolu s názvem účtu, pod kterým je podgraf vytvořen, bude také definovat styl `název-účtu/název-podgrafu` název používaný pro nasazení a koncové body GraphQL. _Toto pole nelze později změnit._ - -**Účet** – Účet, pod kterým je podgraf vytvořen. Může se jednat o účet jednotlivce nebo organizace. _Podgrafy nelze později přesouvat mezi účty._ - -**Název podgrafu** - Text, který se zobrazí na kartách podgrafů. - -**Popis** - Popis podgrafu, viditelný na stránce s podrobnostmi podgrafu. - -**GitHub URL** - Odkaz na repozitář subgrafu na GitHubu. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -Po uložení nového podgrafu se zobrazí obrazovka s nápovědou, jak nainstalovat Graf CLI, jak vygenerovat lešení pro nový podgraf a jak podgraf nasadit. První dva kroky byly popsány v části [Vytvoření podgrafu](/developing/creating-a-subgraph/). - -## Nasazení podgrafu v hostované službě - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -Podgraf nasadíte příkazem `yarn deploy`. - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -Stav podgrafu se přepne na `Synchronizováno`, jakmile uzel grafu extrahuje všechna data z historických bloků. Uzel Graph Node bude pokračovat v kontrole bloků pro váš podgraf, jakmile budou tyto bloky vytěženy. - -## Přerozdělení podgrafu - -Pokud provedete změny v definici podgrafu, například opravíte problém v mapování entit, spusťte znovu výše uvedený příkaz `yarn deploy` a nasaďte aktualizovanou verzi podgrafu. Jakákoli aktualizace podgrafu vyžaduje, aby uzel Graph Node znovu zaindexoval celý váš podgraf, opět počínaje blokem genesis. - -Pokud je váš dříve nasazený podgraf stále ve stavu `Synchronizuje se`, bude okamžitě nahrazen nově nasazenou verzí. Pokud je dříve nasazený podgraf již plně synchronizován, Graph Node označí nově nasazenou verzi jako `Pending Version`, synchronizuje ji na pozadí a pouze po synchronizaci nahradí aktuálně nasazenou verzi novou verzí. nová verze skončila. To zajistí, že budete mít podgraf, se kterým můžete pracovat, zatímco se synchronizuje nová verze. - -## Nasazení podgrafu do více sítí - -V některých případech budete chtít nasadit stejný podgraf do více sítí, aniž byste museli duplikovat celý jeho kód. Hlavním problémem, který s tím souvisí, je skutečnost, že smluvní adresy v těchto sítích jsou různé. - -### Použití graph-cli - -`sestavení grafu` (od `v0.29.0`) i `nasazení grafu` (od `v0.32.0`) přijímají dvě nové možnosti : - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -Pomocí volby `--network` můžete zadat konfiguraci sítě ze standardního souboru `json` (výchozí hodnota je `networks.json`) a snadno aktualizovat podgraf během vývoje. - -**Poznámka:** Příkaz `init` nyní na základě poskytnutých informací automaticky vygeneruje `networks.json`. Poté budete moci aktualizovat stávající sítě nebo přidat další sítě. - -Pokud nemáte soubor `networks.json`, musíte jej vytvořit ručně s následující strukturou: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Poznámka:** V konfiguračním souboru nemusíte zadávat žádné `šablony` (pokud nějaké máte), pouze ` dataSources`. Pokud jsou v souboru `subgraph.yaml` deklarovány nějaké `šablony`, jejich síť bude automaticky aktualizována na síť zadanou pomocí možnosti `--network`. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Takto by měl vypadat konfigurační soubor sítě: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Nyní můžeme spustit jeden z následujících příkazů: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Nyní jste připraveni na `yarn deploy`. - -**Poznámka:** Jak již bylo zmíněno dříve, od `graph-cli 0.32.0` můžete přímo spustit `yarn deploy` pomocí `--network` možnost: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Použití šablony subgraph.yaml - -Jedním z řešení pro starší verze graph-cli, které umožňují parametrizovat aspekty, jako jsou adresy smluv, je generování jeho částí pomocí šablonovacího systému, jako je [Mustache](https://mustache.github.io/) nebo [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -a - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Spolu s tím byste v manifestu nahradili název sítě a adresy zástupnými proměnnými `{{network}}` a `{{address}}` a přejmenovali manifest např. na `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Abychom vygenerovali manifest pro libovolnou síť, můžete přidat k souboru `package.json` dvě další příkazy spolu s závislostí na `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -Funkční příklad najdete [zde](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Poznámka:** Tento přístup lze použít i ve složitějších situacích, kdy je třeba nahradit více než jen smluvní adresy a názvy sítí nebo kdy je třeba generovat mapování nebo ABI také ze šablon. - -## Kontrola stavu podgrafů - -Pokud se podgraf úspěšně synchronizuje, je to dobré znamení, že bude dobře fungovat navždy. Nové spouštěče v síti však mohou způsobit, že se podgraf dostane do neověřeného chybového stavu, nebo může začít zaostávat kvůli problémům s výkonem či operátory uzlů. - -Uzel Grafu vystavuje koncový bod graphql, na který se můžete zeptat a zkontrolovat stav svého podgrafu. V hostované službě je k dispozici na adrese `https://api.thegraph.com/index-node/graphql`. V místním uzlu je ve výchozím nastavení k dispozici na portu `8030/graphql`. Úplné schéma tohoto koncového bodu naleznete [zde](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Zde je příklad dotazu, který zjišťuje stav aktuální verze podgrafu: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -Toto vám poskytne hodnotu `chainHeadBlock`, kterou můžete porovnat s `latestBlock` na vašem podgrafu, abyste zjistili, zda zaostává. Pole `synced` informuje, zda se podgraf kdy dostal na aktuální blok v blockchainu. Pole `health` může aktuálně nabývat hodnoty `healthy`, pokud nedošlo k žádným chybám, nebo `failed`, pokud došlo k chybě, která zastavila postup podgrafu. V takovém případě můžete zkontrolovat pole `fatalError` pro podrobnosti o této chybě. - -## Zásady archivace podgrafů hostovaných služeb - -Hostovaná služba je bezplatný indexer graf uzlů. Vývojáři mohou nasadit podgrafy indexující řadu sítí, které budou indexovány a zpřístupněny pro dotazování prostřednictvím graphQL. - -Pro zlepšení výkonu služby pro aktivní podgrafy bude hostovaná služba archivovat podgrafy, které jsou neaktivní. - -**Podgraf je definován jako "neaktivní", pokud byl do hostované služby nasazen před více než 45 dny a pokud v posledních 45 dnech obdržel 0 dotazů.** - -Vývojáři budou upozorněni e-mail, pokud byl některý z jejich podgrafů označen jako neaktivní, a to 7 dní před jeho odstraněním. Pokud si přejí svůj podgraf "aktivovat", mohou tak učinit dotazem v hřišti graphQL hostované služby svého podgrafu. Vývojáři mohou archivovaný podgraf kdykoli znovu nasadit, pokud je znovu potřeba. - -## Zásady archivace subgrafů Subgraph Studio - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Každý podgraf ovlivněný touto zásadou má možnost vrátit danou verzi zpět. diff --git a/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index ca3707130ad7..000000000000 --- a/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Nasazení podgraf do podgraf Studio ---- - -Toto jsou kroky k nasazení podgrafu do aplikace Podgraf Studio: - -- Instalace Graph CLI (pomocí yarn nebo npm) -- Vytvoření podgrafu v aplikaci podgraf Studio -- Ověření účtu pomocí CLI -- Nasazení podgraf do podgraf Studio - -## Instalace Graph CLI - -K dispozici je CLI pro nasazení podgrafů do [Podgraf Studio](https://thegraph.com/studio/). Zde jsou příkazy pro instalaci `graph-cli`. To lze provést pomocí npm nebo yarn. - -**Instalace pomocí yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Instalace pomocí npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Vytvoření podgrafu v aplikaci podgraf Studio - -Před nasazením skutečného podgrafu musíte vytvořit podgraf v [Subgraph Studiu](https://thegraph.com/studio/). Doporučujeme vám přečíst naši [dokumentaci k Studio](/deploying/subgraph-studio), abyste se dozvěděli více o této proceduře. - -## Inicializace podgrafu - -Po vytvoření podgrafu ve Studio podgrafu můžete inicializovat kód podgrafu pomocí tohoto příkazu: - -```bash -graph init --studio -``` - -Hodnotu `` naleznete na stránce s podrobnostmi o podgrafu v pograf Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -Po spuštění `graph init` budete vyzváni k zadání adresy smlouvy, sítě a ABI, na které se chcete dotazovat. Po provedení tohoto úkonu se na vašem místním počítači vygeneruje nová složka se základním kódem, který vám umožní začít pracovat s vaším podgraf. Poté můžete svůj podgraf dokončit a ujistit se, že funguje podle očekávání. - -## Autorizace grafu - -Před nasazením subgrafu do aplikace Subgraph Studio se musíte přihlásit ke svému účtu v rozhraní CLI. K tomu budete potřebovat svůj deploy klíč, který najdete na stránce "My Subgraphs" nebo na stránce s podrobnostmi o podgrafu. - -Zde je příkaz, který je třeba použít k ověření z CLI: - -```bash -graph auth --studio -``` - -## Nasazení podgraf do podgraf Studio - -Jakmile jste připraveni, můžete podgraf nasadit do podgraf Studio. Tímto krokem nezveřejníte svůj subgraf v decentralizované síti, ale pouze jej nasadíte do svého účtu ve Studiu, kde jej budete moci testovat a aktualizovat metadata. - -Zde je příkaz CLI, který je třeba použít k nasazení podgrafu. - -```bash -graph deploy --studio -``` - -Po spuštění tohoto příkazu se CLI zeptá na označení verze, můžete jej pojmenovat, jak chcete, můžete použít štítky jako `0.1` a `0.2` nebo také písmena například `uniswap-v2-0.1`. Tyto štítky budou viditelné v Průzkumníku Graf a mohou je použít kurátoři k rozhodnutí, zda chtějí v této verzi signalizovat, nebo ne, takže je vybírejte moudře. - -Po nasazení můžete subgraf otestovat v podgraf Studio pomocí hřiště, v případě potřeby nasadit další verzi, aktualizovat metadata, a až budete připraveni, publikovat subgraf v Graf Explorer. diff --git a/website/pages/cs/deploying/hosted-service.mdx b/website/pages/cs/deploying/hosted-service.mdx deleted file mode 100644 index 0c8384271c1f..000000000000 --- a/website/pages/cs/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Co je hostovaná služba? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -Tato část vás provede nasazením podgrafu do [hostované služby](https://thegraph.com/hosted-service/). - -Pokud nemáte účet v hostované službě, můžete se zaregistrovat pomocí účtu GitHub. Jakmile se ověříte, můžete začít vytvářet dílčí grafy prostřednictvím uživatelského rozhraní a nasazovat je z terminálu. Hostovaná služba podporuje řadu sítí, například Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum a další. - -Úplný seznam naleznete v části [Podporované sítě](/developing/supported-networks/#hosted-service). - -## Vytvoření podgrafu - -Nejprve podle pokynů [zde](/developing/creating-a-subgraph/#install-the-graph-cli) nainstalujte Graf CLI. Vytvořte podgraf předáním příkazu `graph init --produkt hostovaná služba` - -### Ze stávající smlouvy - -Pokud již máte na vybrané síti nasazený inteligentní smlouva, může být bootstrapování nového podgrafu z tohoto kontraktu dobrým způsobem, jak začít využívat hostovanou službu. - -Pomocí tohoto příkazu můžete vytvořit podgraf, který indexuje všechny události z existující smlouvy. Pokusí se načíst ABI smlouvy z průzkumníka bloků. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Kromě toho můžete použít následující nepovinné argumenty. Pokud nelze ABI načíst z průzkumníka bloků, vrátí se k požadavku na místní cestu k souboru. Pokud v příkazu chybí některý z nepovinných argumentů, projde příkaz interaktivním forma. - -```sh ---network \ ---abi \ -``` - -`` je v tomto případě jméno vašeho uživatele nebo organizace GitHub, `` je název vašeho subgrafu a `` je volitelný název adresáře, do kterého `graph init` umístí ukázkový manifest subgrafu. `` je adresa vaší existující smlouvy. `` je název sítě, v níž se smlouva nachází. `` je lokální cesta k souboru ABI smlouva. **Obě položky `--síť` a `--abi` jsou nepovinné.** - -### Z příkladu podgrafu - -Druhý režim `graf init` podporuje vytvoření nového projektu z příkladového podgrafu. To provede následující příkaz: - -``` -graph init --from-example --product hosted-service / [] -``` - -Ukázkový podgraf je založen na smlouvě Gravity od Dani Grant, která spravuje uživatelské avatary a vysílá události `NewGravatar` nebo `UpdateGravatar`, kdykoli jsou avatary vytvořeny nebo aktualizovány. Podgraf tyto události zpracovává tak, že zapisuje entity `Gravatar` do úložiště Uzel Graf a zajišťuje jejich aktualizaci podle událostí. Pokračujte na [subgraf manifest](/developing/creating-a-subgraph#the-subgraph-manifest), abyste lépe pochopili, kterým událostem z vašich chytrých kontraktů je třeba věnovat pozornost, mapování a další. - -### Ze smlouvy o Proxy - -Chcete-li sestavit podgraf přizpůsobený pro monitorování smlouvy Proxy, inicializujte podgraf zadáním adresy prováděcí smlouvy. Po dokončení procesu inicializace je posledním krokem aktualizace názvu sítě v souboru subgraph.yaml na adresu smlouvy Proxy. Můžete použít níže uvedený příkaz. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Podporované sítě v hostované službě - -Seznam podporovaných sítí najdete [zde](/developing/supported-networks). diff --git a/website/pages/cs/deploying/subgraph-studio.mdx b/website/pages/cs/deploying/subgraph-studio.mdx deleted file mode 100644 index f612377ac534..000000000000 --- a/website/pages/cs/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Vítejte na svém novém odpalovacím zařízení 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Vytvoření podgrafu prostřednictvím UI Studio -- Nasazení podgrafu pomocí CLI -- Publikování podgrafu pomocí UI Studio -- Vyzkoušejte ji na hřišti -- Integrujte jej do staging pomocí dotazu URL -- Vytváření a správa klíčů API pro konkrétní podgrafy - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Dotazování podgrafů generuje poplatky za dotazy, které se používají k odměňování [Indexerů](/network/indexing) v síti Graf. Pokud jste vývojářem aplikací nebo podgrafů, Studio vám umožní vytvářet lepší subgrafy, které budou sloužit k dotazování vašemu nebo vaší komunity. Studio se skládá z 5 hlavních částí: - -- Správa uživatelského účtu -- Seznam podgrafů, které jste vytvořili -- Sekce pro správu, zobrazení podrobností a vizualizaci stavu konkrétního podgrafu -- Sekce pro správu klíčů API, které budete potřebovat k dotazování podgrafu -- Sekce pro správu vyúčtování - -## Jak si vytvořit účet - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Po přihlášení se na domovské stránce účtu zobrazí váš jedinečný klíč pro nasazení. Ten vám umožní buď publikovat vaše podgrafy, nebo spravovat vaše klíče API + fakturaci. Budete mít jedinečný deploy klíč, který lze znovu vygenerovat, pokud se domníváte, že byl ohrožen. - -## Jak vytvořit podgraf v Podgraf Studio - - - -## Kompatibilita podgrafů se sítí grafů - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index [podporované sítě](/developing/supported-networks) -- Nesmí používat žádnou z následujících funkcí: - - ipfs.cat & ipfs.map - - Nefatální - - Roubování - -Další funkce & sítě budou do síť grafů přidávány postupně. - -### Tok životního cyklu podgrafu - -![Životní cyklus podgrafů](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testování Podgrafu v Podgraf Studio - -Pokud chcete subgraf otestovat před jeho publikováním v síti, můžete tak učinit v podgrafu **Playground** nebo se podívat do protokolů. Záznamy podgraphu vám řeknou **kde** váš podgraf selhal v případě, že se tak stane. - -## Publikování podgrafu v Podgraf Studio - -Dostali jste se až sem - gratulujeme! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Podívejte se také na níže uvedený videopřehled: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexátoři musí předkládat povinné záznamy Proof of Indexing od určitého bloku hash. Protože zveřejnění podgrafu je akce prováděná v řetězci, nezapomeňte, že provedení transakce může trvat až několik minut. Jakákoli adresa, kterou použijete k publikování kontraktu, bude jediná, která bude moci publikovat budoucí verze. Vybírejte proto moudře! - -Podgrafy s kurátorským signál jsou zobrazeny indexátorům, aby mohly být indexovány v decentralizované síti. Podgrafy a signál můžete publikovat v jedné transakci, což umožňuje mincovat první kurátorský signál na podgrafu a šetří náklady na plyn. Přidáním vašeho signálu k signálu, který později poskytnou kurátoři, bude mít váš podgraf také větší šanci, že nakonec obslouží dotazy. - -**Teď, když jste publikovali svůj podgraf, se podíváme na to, jak je budete pravidelně spravovat.** Všimněte si, že podgraf nemůžete publikovat do sítě, pokud se nepodařila jeho synchronizace. Obvykle je to proto, že podgraf má chyby - protokoly vám prozradí, kde se tyto problémy vyskytují! - -## Verzování podgrafu pomocí CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Upozorňujeme, že s publikováním nové verze podgrafu v síti jsou spojeny náklady. Kromě transakčních poplatků musí vývojáři financovat také část kurátorské daně za automaticky migrující signál. Novou verzi podgrafu nelze publikovat, pokud na ni kurátoři nesignalizovali. Více informací o rizicích kurátorství najdete [zde](/network/curating). - -### Automatická archivace verzí podgrafů - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Podraf Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/cs/developing/creating-a-subgraph.mdx b/website/pages/cs/developing/creating-a-subgraph.mdx deleted file mode 100644 index a6868ec9082d..000000000000 --- a/website/pages/cs/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Vytvoření podgraf ---- - -Podgraf získává data z blockchain, zpracovává je a ukládá tak, aby se na ně dalo snadno dotazovat prostřednictvím jazyka GraphQL. - -![Definování podgrafu](/img/defining-a-subgraph.png) - -Definice podgraf se skládá z několika souborů: - -- `subgraph.yaml`: soubor YAML obsahující manifest podgraf - -- `schema.graphql`: schéma GraphQL, které definuje, jaká data jsou uložena pro váš podgraf a jak se na ně dotazovat prostřednictvím jazyka GraphQL - -- `Mapování skriptů sestavy`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) kód, který převádí data událostí na entity definované ve vašem schématu (např. `mapping.ts` v tomto tutoriálu) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Instalace Graf CLI - -Graf CLI je napsáno v jazyce JavaScript a k jeho použití je třeba nainstalovat buď `yarn`, nebo `npm`; v následujícím se předpokládá, že máte yarn. - -Jakmile budete mít `yarn`, nainstalujte Graf CLI spuštěním příkazu - -**Instalace pomocí yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Instalace pomocí npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## Ze stávající smlouvy - -Následující příkaz vytvoří podgraf, který indexuje všechny události existující smlouvy. Pokusí se načíst ABI smlouvy z Etherscan a vrátí se k požadavku na cestu k místnímu souboru. Pokud některý z nepovinných argumentů chybí, projde příkaz interaktivním formulářem. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` je ID vašeho podgraf ve Studio podgraph, najdete ho na stránce s podrobnostmi o podgrafu. - -## Z příkladu podgraf - -Druhý režim `graf init` podporuje vytvoření nového projektu z příkladového podgraf. To provede následující příkaz: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Přidání nových zdrojů dat do existujícího podgraf - -Od verze `v0.31.0` podporuje `graf-cli` přidávání nových zdrojů dat do existujícího podgrafu pomocí příkazu `graf add`. - -```sh -graph add
[] - -Možnosti: - - --abi Cesta k ABI smlouvy (výchozí: stažení z Etherscan) - --contract-name Název kontraktu (výchozí: Contract) - --merge-entities Zda sloučit entity se stejným názvem (výchozí: false) - --network-file Cesta ke konfiguračnímu souboru sítě (výchozí: "./networks.json") -``` - -Příkaz `add` načte ABI z Etherscan (pokud není zadána cesta k ABI pomocí volby `--abi`) a vytvoří nový `dataSource` stejným způsobem jako příkaz `graph init` vytvoří `dataSource` `--from-contract`, přičemž odpovídajícím způsobem aktualizuje schéma a mapování. - -Volba `--merge-entities` určuje, jak chce vývojář řešit konflikty názvů `entity` a `event`: - -- Pokud `true`: nový `dataSource` by měl používat stávající `eventHandlers` & `entity`. -- Pokud `false`: měla by být vytvořena nová entita & obsluha události s `${dataSourceName}{EventName}`. - -Smlouva `adresa` bude zapsána do souboru `networks.json` pro příslušnou síť. - -> **Poznámka:** Při použití interaktivního klienta budete po úspěšném spuštění `graf init` vyzváni k přidání nového `dataSource`. - -## Manifest podgrafu - -Manifest podgrafu `subgraph.yaml` definuje inteligentní smlouvy, které váš podgraf indexuje, kterým událostem z těchto smluv má věnovat pozornost a jak mapovat data událostí na entity, které Graf uzel ukládá a umožňuje dotazovat. Úplnou specifikaci manifestů podgrafu naleznete [zde](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -Pro příklad podgraf `subgraph.yaml` je: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -Důležité položky, které je třeba v manifestu aktualizovat, jsou: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: seznam všech použitých názvů [feature](#experimental-features). - -- `indexerHints.prune`: Definuje uchovávání historických blokových dat pro podgraf. Viz [prune](#prune) v sekci [indexerHints](#indexer-hints). - -- `dataSources.source`: adresa inteligentní smlouvy, ze které podgraf pochází, a ABI inteligentní smlouvy, která se má použít. Adresa je nepovinná; její vynechání umožňuje indexovat odpovídající události ze všech smluv. - -- `dataSources.source.startBlock`: nepovinné číslo bloku, od kterého zdroj dat začíná indexovat. Ve většině případů doporučujeme použít blok, ve kterém byl kontrakt vytvořen. - -- `dataSources.source.endBlock`: Nepovinné číslo bloku, ve kterém zdroj dat přestane indexovat, včetně tohoto bloku. Minimální požadovaná verze specifikace: `0.0.9`. - -- `dataSources.context`: páry klíč-hodnota, které lze použít v rámci mapování podgrafů. Podporuje různé datové typy, například `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` a `BigInt`. U každé proměnné je třeba uvést její `typ` a `údaj`. Tyto kontextové proměnné jsou pak přístupné v mapovacích souborech a nabízejí více konfigurovatelných možností pro vývoj podgrafů. - -- `dataSources.mapping.entities`: entity, které zdroj dat zapisuje do úložiště. Schéma pro každou entita je definováno v souboru schema.graphql. - -- `dataSources.mapping.abis`: jeden nebo více pojmenovaných souborů ABI pro zdrojový kontrakt a všechny ostatní chytré kontrakty, se kterými se pracuje v rámci mapování. - -- `dataSources.mapping.eventHandlers`: uvádí seznam událostí inteligentních smluv, na které tento podgraf reaguje, a obslužných programů v mapování - v příkladu./src/mapping.ts - které tyto události transformují na entity v úložišti. - -- `dataSources.mapping.callHandlers`: obsahuje seznam funkcí inteligentních smluv, na které tento podgraf reaguje, a obsluhovače v mapování, které transformují vstupy a výstupy volání funkcí na entity v úložišti. - -- `dataSources.mapping.blockHandlers`: seznam bloků, na které tento podgraf reaguje, a obslužných v mapování, které se spustí, když je blok přidán do řetězce. Bez filtru se obsluha bloku spustí každý blok. Volitelný filtr volání lze zadat přidáním field with `filter` druhem: volání k obsluze. Tím se obslužná rutina spustí pouze tehdy, pokud blok obsahuje alespoň jedno volání smlouvy zdroje dat. - -Jeden subgraf může indexovat data z více inteligentní smluv. Do pole `dataSources` přidejte položku pro každou smlouvu, ze které je třeba indexovat data. - -### Pořadí spouštěcích Handlers - -Spouštěče pro zdroj dat v rámci bloku jsou seřazeny podle následujícího postupu: - -1. Spouštěče událostí a volání jsou nejprve seřazeny podle indexu transakce v rámci bloku. -2. Spouštěče událostí a volání v rámci jedné transakce jsou seřazeny podle konvence: nejprve spouštěče událostí a poté spouštěče volání, přičemž každý typ dodržuje pořadí, v jakém jsou definovány v manifestu. -3. Spouštěče bloků jsou spuštěny po spouštěčích událostí a volání, v pořadí, v jakém jsou definovány v manifestu. - -Tato pravidla objednávání se mohou změnit. - -> **Poznámka:** Při vytvoření nového [dynamického zdroje dat](#data-source-templates-for-dynamically-created-contracts) se zpracovatelé definovaní pro dynamické zdroje dat začnou zpracovávat až po zpracování všech existujících zpracovatelů zdrojů dat a budou se opakovat ve stejném pořadí, kdykoli budou spuštěny. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Verze | Poznámky vydání | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Získání ABI - -Soubor(y) ABI se musí shodovat s vaší smlouvou. Soubory ABI lze získat několika způsoby: - -- Pokud vytváříte vlastní projekt, budete mít pravděpodobně přístup k nejaktuálnějším ABI. -- Pokud vytváříte podgraf pro veřejný projekt, můžete si tento projekt stáhnout do počítače a získat ABI pomocí [`truffle compile`](https://truffleframework.com/docs/truffle/overview) nebo pomocí solc pro kompilaci. -- ABI můžete také najít na stránce [Etherscan](https://etherscan.io/), ale ta není vždy spolehlivá, protože ABI, které je tam nahráno, může být zastaralé. Ujistěte se, že máte správné ABI, jinak spuštění podgrafu selže. - -## Schéma GraphQL - -Schéma vašeho podgrafu je v souboru `schema.graphql`. Schémata GraphQL se definují pomocí jazyka pro definici rozhraní GraphQL. Pokud jste ještě nikdy schéma GraphQL nepsali, doporučujeme vám přečíst si tento úvodní článek o systému typů GraphQL. Referenční dokumentaci ke schématům GraphQL naleznete v části [GraphQL API](/querying/graphql-api). - -## Definice entit - -Před definováním entit je důležité udělat krok zpět a zamyslet se nad tím, jak jsou vaše data strukturována a propojena. Všechny dotazy budou prováděny proti datovému modelu definovanému ve schématu podgrafu a entitám indexovaným podgrafem. Z tohoto důvodu je dobré definovat schéma podgrafu způsobem, který odpovídá potřebám vaší dapp. Může být užitečné představit si entity spíše jako "objekty obsahující data" než jako události nebo funkce. - -V nástroji The Graf stačí definovat typy entit v `schema.graphql` a Graf Uzel vygeneruje pole nejvyšší úrovně pro dotazování jednotlivých instancí a kolekcí daného typu entit. Každý typ, který má být entitou, je nutné anotovat direktivou `@entity`. Ve výchozím nastavení jsou entity mutovatelné, což znamená, že mapování může načíst existující entity, upravit je a uložit novou verzi dané entity. Mutabilita má svou cenu a u typů entit, u nichž je známo, že nebudou nikdy modifikovány, například proto, že jednoduše obsahují data doslovně extrahovaná z řetězce, se doporučuje označit je jako neměnné pomocí `@entity(immutable: true)`. Mapování může provádět změny v neměnných entitách, pokud k nim dojde ve stejném bloku, ve kterém byla entita vytvořena. Neměnné entity se mnohem rychleji zapisují a dotazují, a proto by se měly používat, kdykoli je to možné. - -### Dobrý příklad - -Níže uvedená entita `Gravatar` je strukturována kolem objektu Gravatar a je dobrým příkladem toho, jak lze entitu definovat. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Špatný příklad - -Níže uvedené příklady entit `GravatarAccepted` a `GravatarDeclined` jsou založeny na událostech. Nedoporučuje se mapovat události nebo volání funkcí na entity 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Nepovinná a povinná pole - -Pole entit lze definovat jako povinná nebo nepovinná. Povinná pole jsou ve schématu označena `!`. Pokud není povinné pole v mapování nastaveno, zobrazí se při dotazu na toto pole tato chyba: - -``` -Vyřešení nulové hodnoty pro pole 'name', které není nulové -``` - -Každá entita musí mít pole `id`, které musí být typu `Bajty!` nebo `Řetězec!`. Obecně se doporučuje používat `Bytes!`, pokud `id` neobsahuje lidsky čitelný text, protože entity s `Bytes!` id se zapisují a dotazují rychleji než entity s `String!` `id`. Pole `id` slouží jako primární klíč a musí být jedinečné mezi všemi entitami stejného typu. Z historických důvodů je akceptován také typ `ID!`, který je synonymem pro `String!`. - -U některých typů entit je `id` vytvořeno z id dvou jiných entit; to je možné pomocí `concat`, např. `let id = left.id.concat(right.id)` pro vytvoření id z id `left` a `right`. Podobně lze použít `let id = left.id.concatI32(count)` pro vytvoření id z id existující entity a čítače `count`. Konkatenace zaručeně vytvoří jedinečné id, pokud je délka `left` pro všechny takové entity stejná, například proto, že `left.id` je `Adresa`. - -### Vestavěné typy skalárů - -#### Podporované skaláry GraphQL - -V našem GraphQL API podporujeme následující skaláry: - -| Typ | Popis | -| --- | --- | -| `Bajtů` | Pole bajtů reprezentované jako hexadecimální řetězec. Běžně se používá pro hashe a adresy Ethereum. | -| `Řetězec` | Skalár pro hodnoty `řetězce`. Nulové znaky nejsou podporovány a jsou automaticky odstraněny. | -| `Boolean` | Skalár pro hodnoty `boolean`. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | Celé číslo se znaménkem o velikosti 8 bajtů, známé také jako 64bitové celé číslo se znaménkem, může uchovávat hodnoty v rozsahu od -9 223 372 036 854 775 808 do 9 223 372 036 854 775 807. Přednostně se používá k reprezentaci `i64` z ethereum. | -| `BigInt` | Velká celá čísla. Používá se pro typy `uint32`, `int64`, `uint64`, ..., `uint256` společnosti Ethereum. Poznámka: Vše pod `uint32`, jako například `int32`, `uint24` nebo `int8`, je reprezentováno jako `i32`. | -| `BigDecimal` | `BigDecimal` Desetinná čísla s vysokou přesností reprezentovaná jako signifikand a exponent. Rozsah exponentu je od -6143 do +6144. Zaokrouhleno na 34 významných číslic. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -Výčty můžete vytvářet také v rámci schématu. Syntaxe enumů je následující: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Jakmile je enum definován ve schématu, můžete použít řetězcovou reprezentaci hodnoty výčtu k nastavení pole výčtu u entity. Například můžete nastavit `tokenStatus` na `SecondOwner` tak, že nejprve definujete svou entitu a následně nastavíte pole pomocí `entity.tokenStatus = "SecondOwner"`. Níže uvedený příklad ukazuje, jak by vypadala entita Token s výčtovým polem: - -Podrobnější informace o zápisu enum najdete v dokumentaci [GraphQL](https://graphql.org/learn/schema/). - -#### Vztahy entit - -Entita může mít vztah k jedné nebo více jiným entitám ve vašem schématu. Tyto vztahy lze procházet v dotazech. Vztahy v Graf jsou jednosměrné. Obousměrné vztahy je možné simulovat definováním jednosměrného vztahu na obou "koncích" vztahu. - -Vztahy se definují u entit stejně jako u jiných polí s tím rozdílem, že zadaný typ je typ jiné entity. - -#### Vztahy jeden na jednoho - -Definujte typ entity `Transaction` s volitelným vztahem jedna ku jedné s typemem entity `TransactionReceipt`: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### Vztahy jeden k mnoha - -Definujte typ entity `TokenBalance` s požadovaným vztahem one-to-many s typem entity Token: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Zpětné vyhledávání - -Reverzní vyhledávání lze u entity definovat prostřednictvím pole `@derivedFrom`. Tím se na entitě vytvoří virtuální pole, na které se lze dotazovat, ale které nelze nastavit ručně prostřednictvím API mapování. Spíše je odvozeno ze vztahu definovaného na jiné entitě. U takových vztahů má zřídkakdy smysl ukládat obě strany vztahu a indexace i výkonnost dotazů budou lepší, když bude uložena pouze jedna strana a druhá bude odvozená. - -U vztahů typu "jeden k mnoha" by měl být vztah vždy uložen na straně "jeden" a strana "mnoho" by měla být vždy odvozena. Uložení vztahu tímto způsobem namísto uložení pole entit na straně "mnoho" povede k výrazně lepšímu výkonu jak při indexování, tak při dotazování na podgraf. Obecně platí, že ukládání polí entit je třeba se vyhnout, pokud je to praktické. - -#### Příklad - -Zůstatky token můžeme zpřístupnit z token odvozením pole `tokenBalances`: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Vztahy mnoho k mnoha - -Pro vztahy mnoho-více, jako jsou uživatelé, z nichž každý může patřit do libovolného počtu organizací, je nejjednodušší, ale obecně ne nejvýkonnější, modelovat vztah jako pole v každé z obou zúčastněných entit. Pokud je vztah symetrický, je třeba uložit pouze jednu stranu vztahu a druhou stranu lze odvodit. - -#### Příklad - -Definujte zpětné vyhledávání z typu entity `User` na typ entity `Organization`. V příkladu níže je toho dosaženo vyhledáním atributu `members` z entity `Organization`. V dotazech bude pole `organizations` na `User` vyřešeno vyhledáním všech entit `Organization`, které obsahují ID uživatele. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -Výkonnějším způsobem uložení tohoto vztahu je mapovací tabulka, která má pro každou dvojici `Uživatel` / `Organizace` jeden záznam se schématem, jako je např - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -Tento přístup vyžaduje, aby dotazy sestupovaly do další úrovně, aby bylo možné získat například organizace pro uživatele: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -Tento propracovanější způsob ukládání vztahů mnoho-více vede k menšímu množství dat uložených pro podgraf, a tedy k podgrafu, který je často výrazně rychlejší při indexování a dotazování. - -#### Přidání komentářů do schématu - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Definování polí fulltextového vyhledávání - -Fulltextové vyhledávací dotazy filtrují a řadí entity na základě textového vyhledávacího vstupu. Fulltextové dotazy jsou schopny vracet shody podobných slov tím, že zpracovávají vstupní text dotazu do kmenů před jejich porovnáním s indexovanými textovými daty. - -Definice fulltextového dotazu obsahuje název dotazu, jazykový slovník použitý ke zpracování textových polí, algoritmus řazení použitý k seřazení výsledků a pole zahrnutá do vyhledávání. Každý fulltextový dotaz může zahrnovat více polí, ale všechna zahrnutá pole musí být z jednoho typu entity. - -Chcete-li přidat fulltextový dotaz, zahrňte do schématu GraphQL typ `_Schema_` s direktivou fulltext. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -Příklad pole `bandSearch` lze použít v dotazech k filtrování entit `Band` na základě textových dokumentů v polích `name`, `description` a `bio`. Přejděte na [GraphQL API - dotazy](/querying/graphql-api#queries), kde najdete popis API pro fulltextové vyhledávání a další příklady použití. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Správa funkcí](#experimental-features):** Od `specVersion` `0.0.4` musí být `fullTextSearch` deklarováno v sekci `features` v manifestu podgrafů. - -### Podporované jazyky - -Výběr jiného jazyka bude mít na rozhraní API fulltextového vyhledávání rozhodující, i když někdy nenápadný vliv. Pole zahrnutá do pole fulltextového dotazu jsou zkoumána v kontextu zvoleného jazyka, takže lexémy vytvořené analýzou a vyhledávacími dotazy se v jednotlivých jazycích liší. Například: při použití podporovaného tureckého slovníku je "token" odvozeno od "toke", zatímco anglický slovník jej samozřejmě odvozuje od "token". - -Podporované jazykové slovníky: - -| Kód | Slovník | -| ---------- | ---------- | -| jednoduchý | Obecné | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Algoritmy řazení - -Podporované algoritmy pro řazení výsledků: - -| Algoritmus | Popis | -| ------------- | ------------------------------------------------------------------------ | -| hodnost | Pro seřazení výsledků použijte kvalitu shody (0-1) fulltextového dotazu. | -| proximityRank | Podobně jako pořadí, ale zahrnuje také blízkost zápasů. | - -## Psát mapování - -Mapování přebírá data z určitého zdroje a transformuje je na entity definované ve vašem schématu. Mapování jsou zapsána v podmnožině jazyka [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) nazvané [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki), kterou lze zkompilovat do jazyka WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript je přísnější než běžný TypeScript, přesto poskytuje známou syntaxi. - -Pro každou obsluhu události definovanou v souboru `subgraph.yaml` v části `mapping.eventHandlers` vytvořte exportovanou funkci stejného jména. Každá obslužná funkce musí přijímat jeden parametr nazvaný `událost` s typem odpovídajícím názvu události, která je obsluhována. - -V příkladovém podgrafu `src/mapping.ts` obsahuje obsluhy událostí `NewGravatar` a `UpdatedGravatar`: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -První obslužný program přijme událost `NewGravatar` a vytvoří novou entitu `Gravatar` s `new Gravatar(event.params.id.toHex())`, přičemž pole entity vyplní pomocí odpovídajících parametrů události. Tato instance entity je reprezentována proměnnou `gravatar` s hodnotou id `event.params.id.toHex()`. - -Druhá obslužná rutina se pokusí načíst existující `Gravatar` z úložiště Graf Uzel. Pokud ještě neexistuje, je vytvořen na vyžádání. Entita je poté aktualizována tak, aby odpovídala novým parametrům události, a poté je uložena zpět do úložiště pomocí `gravatar.save()`. - -### Doporučené IDa pro vytváření nových Entity - -Důrazně se doporučuje používat typ `Bajtes` pro pole `id` a typ `String` používat pouze pro atributy, které skutečně obsahují lidsky čitelný text, například název tokenu. Níže jsou uvedeny některé doporučené hodnoty `id`, které je třeba zvážit při vytváření nových entit. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- Pro entity, které ukládají agregovaná data, např. denní objemy obchodů, obvykle obsahuje `id` číslo dne, Zde je výhodné použít `Bytes` jako `id`. Určení `id` by vypadalo takto - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Převod konstantních adres na `Bajty`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Zpracování entit se stejnými ID - -Pokud při vytváření a ukládání nové entity již existuje entita se stejným ID, jsou při slučování vždy upřednostněny vlastnosti nové entity. To znamená, že existující entita bude aktualizována hodnotami z nové entity. - -Pokud je pro pole v nové entitě se stejným ID záměrně nastavena nulová hodnota, bude stávající entita aktualizována s nulovou hodnotou. - -Pokud není pro pole v nové entitě se stejným ID nastavena žádná hodnota, bude pole rovněž nulové. - -## Generování kódu - -Aby byla práce s inteligentními smlouvami, událostmi a entitami snadná a typově bezpečná, může Graf CLI generovat typy AssemblyScript ze schématu GraphQL podgrafu a ABI smluv obsažených ve zdrojích dat. - -To se provádí pomocí - -```sh -graph codegen [--output-dir ] [] -``` - -ale ve většině případů jsou podgrafy již předkonfigurovány prostřednictvím souboru `package.json`, takže pro dosažení téhož stačí spustit jeden z následujících příkazů: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Tím se vygeneruje třída AssemblyScript pro každou chytrou smlouvu v souborech ABI uvedených v `subgraph.yaml`, což vám umožní svázat tyto smlouvy s konkrétními adresami v mapování a volat metody smlouvy pouze pro čtení proti zpracovávanému bloku. Pro každou událost kontraktu také vygeneruje třídu, která umožní snadný přístup k parametrům události a také k bloku a transakci, ze které událost pochází. Všechny tyto typy se zapisují do souboru `//.ts`. V příkladovém podgrafu by to bylo `generated/Gravity/Gravity.ts`, což umožňuje mapování, kterým lze tyto typy importovat. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -Kromě toho je pro každý typ entity ve schématu GraphQL podgrafu vygenerována jedna třída. Tyto třídy zajišťují typově bezpečné načítání entit, přístup k polím entit pro čtení a zápis a také metodu `save()` pro zápis entit do úložiště. Všechny třídy entit jsou zapsány do souboru `/schema.ts`, což umožňuje mapování importovat je pomocí funkce - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Poznámka:** Po každé změně schématu jazyka GraphQL nebo ABI obsažených v manifestu je nutné provést generování kódu znovu. Musí být také provedeno alespoň jednou před sestavením nebo nasazením podgrafu. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Šablony zdrojů dat - -Běžným vzorem v inteligentních smlouvách kompatibilních s EVM je používání registrů nebo továrních smluv, kdy jedna smlouva vytváří, spravuje nebo odkazuje na libovolný počet dalších smluv, z nichž každá má svůj vlastní stav a události. - -Adresy těchto dílčích smluv mohou, ale nemusí být známy předem a mnoho z těchto smluv může být vytvořeno a/nebo přidáno v průběhu času. Proto v takových případech není možné definovat jediný zdroj dat nebo pevný počet zdrojů dat a je zapotřebí dynamičtější přístup: _šablony zdrojů dat_. - -### Zdroj dat pro hlavní smlouvu - -Nejprve definujete běžný zdroj dat pro hlavní smlouvu. Níže uvedený úryvek ukazuje zjednodušený příklad zdroje dat pro smlouvu [Uniswap](https://uniswap.org) exchange factory. Všimněte si obsluhy události `NewExchange(address,address)`. Ta je emitována, když je v řetězci vytvořena nová směnná smlouva tovární smlouvou. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Šablony zdrojů dat pro dynamicky vytvářené smlouvy - -Poté do manifestu přidáte _šablony datových zdrojů_. Ty jsou totožné s běžnými zdroji dat, pouze postrádají předdefinovanou adresu smlouvy v položce `zdroj`. Obvykle byste definovali jednu šablonu pro každý typ dílčí smlouvy spravované nebo odkazované nadřazenou smlouvou. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instancování šablony zdroje dat - -V posledním kroku aktualizujete mapování hlavní smlouvy a vytvoříte dynamickou instanci zdroje dat z jedné ze šablon. V tomto příkladu byste změnili mapování hlavní smlouvy tak, abyste importovali šablonu `Exchange` a zavolali na ní metodu `Exchange.create(address)`, abyste zahájili indexování nové smlouvy exchange. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Poznámka:** Nový zdroj dat bude zpracovávat pouze volání a události pro blok, ve kterém byl vytvořen, a všechny následující bloky, ale nebude zpracovávat historická data, tj. data obsažená v předchozích blocích. -> -> Pokud předchozí bloky obsahují data relevantní pro nový zdroj dat, je nejlepší tato data indexovat načtením aktuálního stavu smlouvy a vytvořením entit reprezentujících tento stav v době vytvoření nového zdroje dat. - -### Kontext zdroje dat - -Kontexty zdrojů dat umožňují předávat další konfiguraci při instanci šablony. V našem příkladu řekněme, že burzy jsou spojeny s konkrétním obchodním párem, který je obsažen v události `NewExchange`. Tuto informaci lze předat do instancovaného zdroje dat takto: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Uvnitř mapování šablony `výměna` lze pak přistupovat ke kontextu: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Pro všechny typy hodnot existují setter a getter jako `setString` a `getString`. - -## Výchozí bloky - -`startBlock` je volitelné nastavení, které umožňuje určit, od kterého bloku v řetězci začne zdroj dat indexovat. Nastavení počátečního bloku umožňuje zdroji dat přeskočit potenciálně miliony bloků, které jsou irelevantní. Typicky vývojář podgrafu nastaví `startBlock` na blok, ve kterém byl vytvořen inteligentní kontrakt zdroje dat. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Poznámka:** Blok pro vytvoření smlouvy lze rychle vyhledat v Etherscan: -> -> 1. Vyhledejte smlouvu zadáním její adresy do vyhledávacího řádku. -> 2. Klikněte na hash transakce vytvoření v sekci `Tvůrce smlouvy`. -> 3. Načtěte stránku s podrobnostmi o transakci, kde najdete počáteční blok pro danou smlouvu. - -## Tipy indexátor - -Nastavení `indexerHints` v manifestu podgrafu poskytuje směrnice pro indexátory ohledně zpracování a správy podgrafu. Ovlivňuje provozní rozhodnutí v oblasti manipulace s daty, strategií indexace a optimalizací. V současné době obsahuje možnost `prune` pro správu uchovávání nebo odstraňování historických dat. - -> This feature is available from `specVersion: 1.0.0` - -### Prořezávat - -`indexerHints.prune`: Definuje zachování historických blokových dat pro podgraf. Mezi možnosti patří: - -1. `"nikdy"`: Žádné ořezávání historických dat; zachovává celou historii. -2. `"auto"`: Zachovává minimální potřebnou historii nastavenou indexátorem, čímž optimalizuje výkon dotazu. -3. Konkrétní číslo: Nastaví vlastní limit počtu historických bloků, které se mají zachovat. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Použití `"auto"` se obecně doporučuje, protože maximalizuje výkon dotazu a je dostačující pro většinu uživatelů, kteří nevyžadují přístup k rozsáhlým historickým datům. - -U podgrafů využívajících [dotazy na cestování v čase](/querying/graphql-api/#time-travel-queries) je vhodné buď nastavit určitý počet bloků pro uchovávání historických dat, nebo použít `prune: never` pro uchování všech historických stavů entit. Níže jsou uvedeny příklady, jak obě možnosti nakonfigurovat v nastavení podgrafu: - -Uchování určitého množství historických dat: - -``` - indexerHints: - prune: 1000 # Nahraďte 1000 požadovaným počtem bloků, které chcete zachovat -``` - -Zachování kompletní historie entitních států: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Zpracovatelé hovorů - -Události sice představují účinný způsob, jak shromažďovat relevantní změny stavu smlouvy, ale mnoho smluv se vyhýbá generování protokolů, aby se optimalizovaly náklady na plyn. V těchto případech se dílčí graf může přihlásit k odběru volání provedených na smlouvu se zdrojem dat. Toho lze dosáhnout definováním obsluhy volání odkazující na signaturu funkce a obsluhu mapování, která bude zpracovávat volání této funkce. Pro zpracování těchto volání obdrží mapovací obsluha jako argument `ethereum.Call` s typizovanými vstupy do volání a výstupy z volání. Volání uskutečněná v libovolné hloubce řetězce volání transakce spustí mapování, což umožní zachytit aktivitu se smlouvou zdroje dat prostřednictvím proxy smluv. - -Obsluhy volání se spustí pouze v jednom ze dvou případů: když je zadaná funkce volána jiným účtem než samotnou smlouvou nebo když je v Solidity označena jako externí a volána jako součást jiné funkce ve stejné smlouvě. - -> **Poznámka:** Zpracovatelé volání jsou v současné době závislí na API pro sledování parity. Některé sítě, například řetězec BNB a Arbitrum, toto API nepodporují. Pokud podgraf indexující některou z těchto sítí obsahuje jeden nebo více zpracovatelů volání, nezačne se synchronizovat. Vývojáři podgrafů by místo toho měli používat obsluhy událostí. Ty jsou mnohem výkonnější než obsluhy volání a jsou podporovány v každé síti evm. - -### Definice obsluhy volání - -Chcete-li v manifestu definovat obsluhu volání, jednoduše přidejte pole `callHandlers` pod zdroj dat, ke kterému se chcete přihlásit. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`funkce` je normalizovaná signatura funkce, podle které se filtrují volání. Vlastnost `handler` je název funkce ve vašem mapování, kterou chcete spustit při volání cílové funkce v kontraktu zdroje dat. - -### Funkce mapování - -Každá obslužná funkce volání přijímá jeden parametr, který má typ odpovídající názvu volané funkce. Ve výše uvedeném příkladu podgraf obsahuje mapování obslužnou rutinu pro případ, kdy je volána funkce `createGravatar` a jako argument přijímá parametr `CreateGravatarCall`: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -Funkce `handleCreateGravatar` přebírá novou `CreateGravatarCall`, což je podtřída `ethereum.Call`, kterou poskytuje `@graphprotocol/graph-ts`, která obsahuje typizované vstupy a výstupy volání. Typ `CreateGravatarCall` je pro vás vygenerován při spuštění `graph codegen`. - -## Obsluha bloků - -Kromě přihlášení k událostem smlouvy nebo volání funkcí může podgraf chtít aktualizovat svá data, když jsou do řetězce přidány nové bloky. Za tímto účelem může podgraf spustit funkci po každém bloku nebo po blocích, které odpovídají předem definovanému filtru. - -### Podporované filtry - -#### Filtr volání - -```yaml -filter: - kind: call -``` - -_Definovat obslužná rutina bude zavolána jednou pro každý blok, který obsahuje volání smlouvy (zdroje dat), pod kterou je rutina definovát._ - -> **Poznámka:** Filtr `call` v současné době závisí na API pro sledování parity. Některé sítě, například řetězec BNB a Arbitrum, toto API nepodporují. Pokud podgraf indexující jednu z těchto sítí obsahuje jeden nebo více blokových manipulátorů s filtrem `call`, nezačne se synchronizovat. - -Protože pro obsluhu bloku neexistuje žádný filtr, zajistí, že obsluha bude volána každý blok. Zdroj dat může obsahovat pouze jednu blokovou obsluhu pro každý typ filtru. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Filtr dotazování - -> **Vyžaduje `specVersion`> >= 0.0.8** - -> **Poznámka:** Filtry zdroj dat jsou k dispozici pouze pro zdroje dat `druhu: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -Definovaný obslužná rutina bude zavolána jednou pro každých `n` bloků, kde `n` je hodnota uvedená v poli `every`. Tato konfigurace umožňuje dílčímu graf provádět specifické operace v pravidelných intervalech bloků. - -#### Jednou Filtr - -> **Vyžaduje `specVersion`> >= 0.0.8** - -> **Poznámka:** Jednou použité filtry jsou dostupné pouze pro zdroje dat `druhu: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -Definovaný obslužná rutina s filtrem once bude zavolána pouze jednou před spuštěním všech ostatních rutin. Tato konfigurace umožňuje, aby podgraf používal obslužný program jako inicializační obslužný, který provádí specifické úlohy na začátku indexování. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Funkce mapování - -Funkce mapování obdrží jako jediný argument `ethereum.Block`. Stejně jako mapovací funkce pro události může tato funkce přistupovat k existujícím entitám subgrafu v úložišti, volat chytré kontrakty a vytvářet nebo aktualizovat entity. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymní události - -Pokud potřebujete v Solidity zpracovávat anonymní události, lze toho dosáhnout zadáním tématu 0 události, jak je uvedeno v příkladu: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -Událost se spustí pouze tehdy, když se shoduje signatura i téma 0. Ve výchozím nastavení se `téma0` rovná hash signatury události. - -## Potvrzení transakcí v obslužných rutinách událostí - -Počínaje `specVersion` `0.0.5` a `apiVersion` `0.0.7` mohou mít obsluhy událostí přístup k potvrzení transakce, která je vyvolala. - -Za tímto účelem musí být obsluhy událostí deklarovány v manifestu podgrafů pomocí nového klíče `receipt: true`, který je nepovinný a výchozí hodnota je není pravda. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Uvnitř obslužné funkce je příjem přístupný v poli `Event.receipt`. Pokud je klíč `receipt` nastaven na `false` nebo je v manifestu vynechán, bude místo něj vrácena hodnota `null`. - -## Experimentální funkce - -Počínaje `specVersion` `0.0.4` musí být funkce podgrafů explicitně deklarovány v sekci `features` na nejvyšší úrovni souboru manifestu s použitím jejich názvu `camelCase`, jak je uvedeno v následující tabulce: - -| Vlastnosti | Název | -| ----------------------------------------------------------- | ---------------- | -| [Nefatální](#non-fatal-errors) | `nonFatalErrors` | -| [Fulltextové vyhledávání](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Štěpování](#grafting-onto-existing-subgraphs) | `štěpování` | - -Pokud například dílčí graf používá funkce **Plnotextové vyhledávání** a **Nefatální chyby**, pole `Vlastnosti` v manifestu by mělo být: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Všimněte si, že použití funkce bez její deklarace způsobí při nasazení podgraf chybu **validace**, ale pokud je funkce deklarována, ale není použita, k žádné chybě nedojde. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Nefatální - -Chyby indexování v již synchronizovaných podgrafech ve výchozím nastavení způsobí selhání podgrafy a zastavení synchronizace. Podgrafy lze alternativně nakonfigurovat tak, aby pokračovaly v synchronizaci i při přítomnosti chyb, a to ignorováním změn provedených obslužnou rutinou, která chybu vyvolala. To dává autorům podgrafů čas na opravu jejich podgrafů, zatímco dotazy jsou nadále obsluhovány proti poslednímu bloku, ačkoli výsledky mohou být nekonzistentní kvůli chybě, která chybu způsobila. Všimněte si, že některé chyby jsou stále fatální. Aby chyba nebyla fatální, musí být známo, že je deterministická. - -> **Poznámka:** Síť grafů zatím nepodporuje nefatální chyby a vývojáři by neměli do sítě nasazovat podgrafy využívající tuto funkci prostřednictvím Studio. - -Povolení nefatálních chyb vyžaduje nastavení následujícího příznaku funkce v manifestu podgraf: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -Dotaz se také musí přihlásit k dotazování na data s potenciálními nekonzistencemi prostřednictvím argumentu `subgraphError`. Doporučuje se také dotazovat se pomocí `_meta`, aby bylo možné zkontrolovat, zda podgraf nepřeskočil chyby, jako v příkladu: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Pokud podgraf narazí na chybu, vrátí tento dotaz jak data, tak chybu graphql se zprávou `"indexing_error"`, jako v tomto příkladu odpovědi: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Roubování na existující podgrafy - -> **Poznámka:** při počátečním upgrade na Síť graf se nedoporučuje používat roubování. Více informací se dozvíte [zde](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -Při prvním nasazení podgrafu se události začnou indexovat v bloku geneze příslušného řetězce (nebo v bloku `startBlock` definovaném u každého zdroje dat). Za určitých okolností je výhodné znovu použít data z existujícího podgrafu a začít indexovat v mnohem pozdějším bloku. Tento způsob indexování se nazývá _roubování_. roubování je užitečné například během vývoje, abyste se rychle dostali přes jednoduché chyby v mapování nebo abyste dočasně znovu zprovoznili existující podgraf poté, co selhal. - -Podgraf je naroubován na základní podgraf, pokud manifest podgrafu v souboru `subgraph.yaml` obsahuje blok `graft` na nejvyšší úrovni: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -Když je nasazen podgraf, jehož manifest obsahuje blok `graft`, Graf Uzel zkopíruje data `základního` podgrafu až do daného `bloku` včetně a poté pokračujte v indexování nového podgrafu od tohoto bloku dále. Základní podgraf musí existovat v cílové instanci Graph Node a musí být indexován alespoň do daného bloku. Kvůli tomuto omezení by se roubování mělo používat pouze během vývoje nebo během nouzového stavu, aby se urychlila tvorba ekvivalentního neroubovaného podgrafu. - -Protože se při roubování základní data spíše kopírují než indexují, je mnohem rychlejší dostat podgraf do požadovaného bloku než při indexování od nuly, i když počáteční kopírování dat může u velmi velkých podgrafů trvat i několik hodin. Během inicializace roubovaného podgrafu bude uzel Graf Uzel zaznamenávat informace o typů entit, které již byly zkopírovány. - -Roubované podgraf může používat schéma GraphQL, které není totožné se schématem základního podgrafu, ale je s ním pouze kompatibilní. Musí to být platné schéma podgrafu jako takové, ale může se od schématu základního podgrafu odchýlit následujícími způsoby: - -- Přidává nebo odebírá typy entit -- Odstraňuje atributy z typů entit -- Přidává nulovatelné atributy k typům entit -- Mění nenulovatelné atributy na nulovatelné atributy -- Přidává hodnoty de enums -- Přidává nebo odebírá rozhraní -- Mění se, pro které typy entit je rozhraní implementováno - -> **[Feature Management](#experimental-features):** `grafting` musí být deklarováno v `features` v manifestu podgrafů. - -## IPFS/Arweave File Data Sources - -Zdroje dat souborů jsou novou funkcí podgrafu pro přístup k datům mimo řetězec během indexování robustním a rozšiřitelným způsobem. Zdroje souborových dat podporují načítání souborů ze systému IPFS a z Arweave. - -> To také vytváří základ pro deterministické indexování dat mimo řetězec a potenciální zavedení libovolných dat ze zdrojů HTTP. - -### Přehled - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> Nahrazuje stávající API `ipfs.cat` - -### Průvodce upgradem - -#### Aktualizace `graph-ts` a `graph-cli` - -Souborové zdroje dat vyžadují graph-ts >=0.29.0 a graph-cli >=0.33.1 - -#### Přidání nového typu entity, který bude aktualizován při nalezení souborů - -Zdroje dat souborů nemohou přistupovat k entitám založeným na řetězci ani je aktualizovat, ale musí aktualizovat entity specifické pro soubor. - -To může znamenat rozdělení polí ze stávajících entit do samostatných entit, které budou vzájemně propojeny. - -Původní kombinovaný entita: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Nové, rozdělená entit: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Pokud je vztah mezi nadřazenou entitou a entitou výsledného zdroje dat souboru 1:1, je nejjednodušším vzorem propojení nadřazené entity s entitou výsledného souboru pomocí CID IPFS jako vyhledávacího prvku. Pokud máte potíže s modelováním nových entit založených na souborech, ozvěte se na Discord! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Přidání nového šablony zdroje dat s `druhem: file/ipfs` nebo `druhem: file/arweave` - -Jedná se o zdroj dat, který bude vytvořen při identifikaci souboru zájmu. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> V současné době jsou vyžadovány `abis`, ačkoli není možné volat smlouvy ze zdrojů dat souborů - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Vytvoření nové obslužné pro zpracování souborů - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -CID souboru jako čitelný řetězec lze získat prostřednictvím `dataSource` následujícím způsobem: - -```typescript -const cid = dataSource.stringParam() -``` - -Příklad - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn zdrojů dat souborů v případě potřeby - -Nyní můžete vytvářet zdroje dat souborů během provádění obslužných založených na řetězci: - -- Import šablony z automaticky generovaných `šablon` -- volání `TemplateName.create(cid: string)` z mapování, kde cid je platný identifikátor obsahu pro IPFS nebo Arweave - -Pro systém IPFS podporuje Graf Uzel identifikátory obsahu [v0 a v1](https://docs.ipfs.tech/concepts/content-addressing/) a identifikátory obsahu s adresáři (např. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Příklad: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -Tím se vytvoří nový zdroj dat souborů, který bude dotazovat nakonfigurovaný koncový bod IPFS nebo Arweave grafického uzlu a v případě nenalezení se pokusí o opakování. Když je soubor nalezen, spustí se obslužná zdroje dat souboru. - -Tento příklad používá CID jako vyhledávání mezi nadřazenou entitou `Token` a výslednou entitou `TokenMetadata`. - -> Dříve by vývojář podgrafu zavolal `ipfs.cat(CID)` a načetl by soubor - -Gratulujeme, používáte souborové zdroje dat! - -#### Nasazení podgrafů - -Nyní můžete `sestavit` a `rozšířit` svůj podgraf do libovolného uzlu Graf >=v0.30.0-rc.0. - -#### Omezení - -Zpracovatelé a entity zdrojů dat souborů jsou izolovány od ostatních entit podgrafů, což zajišťuje, že jsou při provádění deterministické a nedochází ke kontaminaci zdrojů dat založených na řetězci. Přesněji řečeno: - -- Entity vytvořené souborovými zdroji dat jsou neměnné a nelze je aktualizovat -- Obsluhy zdrojů dat souborů nemohou přistupovat k entita z jiných zdrojů dat souborů -- K entita přidruženým k datovým zdrojům souborů nelze přistupovat pomocí zpracovatelů založených na řetězci - -> Ačkoli by toto omezení nemělo být pro většinu případů použití problematické, pro některé může představovat složitost. Pokud máte problémy s modelováním dat založených na souborech v podgrafu, kontaktujte nás prosím prostřednictvím služby Discord! - -Kromě toho není možné vytvářet zdroje dat ze zdroje dat souborů, ať už se jedná o zdroj dat v řetězci nebo jiný zdroj dat souborů. Toto omezení může být v budoucnu zrušeno. - -#### Osvědčené postupy - -Pokud propojovat metadata NFT s odpovídajícími tokeny, použijte hash IPFS metadat k odkazu na entita Metadata z entity Token. Uložte entitu Metadata s použitím hashe IPFS jako ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -Pokud máte entity, které se obnovují vícekrát, vytvořte jedinečné entity založené na souborech pomocí hash & IPFS; ID entity a odkazujte na ně pomocí odvozeného pole v entitě založené na řetězci. - -> Pracujeme na zlepšení výše uvedeného doporučení, aby dotazy vracely pouze "nejnovější" verzi - -#### Známé problémy - -Souborové zdroje dat v současné době vyžadují ABI, i když se ABI nepoužívá ([problém](https://github.com/graphprotocol/graph-cli/issues/961)). Řešením je přidání libovolného ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Příklady - -[Migrace podgrafů Crypto Coven](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Odkazy: - -[Zdroje dat souborů GIP](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/cs/developing/creating-a-subgraph/_meta.js b/website/pages/cs/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/cs/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/developing/graph-ts/_meta.js b/website/pages/cs/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/cs/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/cs/managing/deprecate-a-subgraph.mdx b/website/pages/cs/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/cs/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/cs/mips-faqs.mdx b/website/pages/cs/mips-faqs.mdx deleted file mode 100644 index f826d4fdc367..000000000000 --- a/website/pages/cs/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Úvod - -> Poznámka: program MIPs je od května 2023 uzavřen. Děkujeme všem indexátorům, kteří se programu zúčastnili! - -Účast v ekosystému Grafu je vzrušující! Během [Dne Grafu 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal oznámil [ukončení hostované služby](https://thegraph.com/blog/sunsetting-hosted-service/), což je okamžik, na kterém ekosystém Graf pracoval mnoho let. - -Nadace The Graph Foundation vyhlásila program [Migration Infrastructure Providers (MIPs)](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program), který má podpořit ukončení hostované služby a migraci všech jejích aktivit do decentralizované sítě. - -Program MIPs je motivační program pro indexátory, který je podporuje zdroji pro indexování řetězců mimo mainnet Ethereum a pomáhá protokolu The Graph rozšířit decentralizovanou síť na infrastrukturní vrstvu s více řetězci. - -Program MIPs vyčlenil 0.75% zásoby GRT (75M GRT), přičemž 0.5% je určeno na odměnu indexátorům, kteří přispívají k zavádění sítě, a 0.25% na síťové granty pro vývojáře podgrafů využívajících víceřetězcové podgrafy. - -### Užitečné zdroje - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [Jak se stát efektivním indexátorem v síti Graf](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Optimalizátor přidělování](https://github.com/graphprotocol/allocationopt.jl) -- [Nástroje pro optimalizaci přidělování](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Je možné vygenerovat platný důkaz indexace (POI), i když podgraf selhal? - -Ano, je to tak. - -Pro představu, charta rozhodčího řízení [více informací o chartě naleznete zde](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) specifikuje metodiku generování POI pro neúspěšný podgraf. - -Člen komunity [SunTzu](https://github.com/suntzu93) vytvořil skript, který tento proces automatizuje v souladu s metodikou charty rozhodčího řízení. Podívejte se na repo [zde](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Který řetězec bude program MIPs motivovat jako první? - -Prvním řetězcem, který bude v decentralizované síti podporován, je Gnosis Chain! Gnosis Chain, dříve známý jako xDAI, je řetězec založený na EVM. Gnosis Chain byl vybrán jako první vzhledem k uživatelské přívětivosti běžících uzlů, připravenosti Indexeru, souladu s Graf a přijetí v rámci web3. - -### 3. Jak budou do programu MIP přidávány nové řetězce? - -Nové řetězce budou vyhlašovány v průběhu programu MIPs na základě připravenosti indexátorů, poptávky a nálady komunity. Řetězce budou nejprve podporovány v testnetu a následně bude schválen GIP, který bude daný řetězec podporovat v mainnetu. Indexátoři účastnící se programu MIPs si vyberou, které řetězce mají zájem podporovat, a budou získávat odměny za každý řetězec, kromě toho budou získávat poplatky za dotazy a odměny za indexování v síti za obsluhu podgrafů. Účastníci programu MIPs budou hodnoceni na základě svého výkonu, schopnosti sloužit potřebám sítě a podpory komunity. - -### 4. Jak poznáme, že je síť připravena na nový řetězec? - -Nadace Graf bude sledovat výkonnostní metriky QoS, výkonnost sítě a komunitní kanály, aby mohla co nejlépe posoudit připravenost. Prioritou je zajistit, aby síť splňovala výkonnostní potřeby těch víceřetězcových dapů, které budou moci migrovat své podgrafy. - -### 5. Jak jsou odměny rozděleny do jednotlivých řetězců? - -Vzhledem k tomu, že se řetězce liší svými požadavky na sync uzly a liší se i objemem dotazů a jejich přijetím, bude o odměnách pro každý řetězec rozhodnuto na konci cyklu daného řetězce, aby se zajistilo, že budou zachyceny všechny zpětné vazby a poznatky. V každém okamžiku však budou moci indexátoři získávat také odměny za dotazy a indexaci, jakmile bude řetězec v síti podporován. - -### 6. Musíme indexovat všechny řetězce v programu MIPs, nebo můžeme vybrat jen jeden řetězec a indexovat ho? - -Můžete indexovat libovolný řetězec! Cílem programu MIPs je vybavit indexátory nástroji a znalostmi, aby mohli indexovat řetězce, které si přejí, a podporovat ekosystémy web3, o které mají zájem. Pro každý řetězec však existují fáze od testnet po mainnet. Ujistěte se, že jste absolvovali všechny fáze pro řetězce, které indexujete. Více informací o fázích naleznete na stránce [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059). - -### 7. Kdy budou odměny rozděleny? - -Odměny MIP budou rozděleny na řetězce, jakmile budou splněny výkonnostní metriky a migrované podgrafy budou těmito indexéry podporovány. Informace o celkových odměnách za řetězec najdete v polovině cyklu daného řetězce. - -### jak funguje bodování? - -Indexátoři budou soutěžit o odměny na základě bodového hodnocení v průběhu celého programu na žebříčku. Bodování programu bude založeno na: - -**pokrytí podgraf** - -- Poskytujete maximální podporu podgrafů na řetězec? - -- Během MIP se od velkých indexátorů očekává, že budou sázet více než 50% podgrafů na řetězec, který podporují. - -**Kvalita služeb** - -- Obsluhuje indexer řetězec s dobrou kvalitou služby (latence, čerstvá data, provozuschopnost atd.)? - -- Podporuje Indexer vývojáře dapp a reaguje na jejich potřeby? - -Přiděluje indexer efektivně a přispívá k celkovému stavu sítě? - -**Podpora komunity** - -- Spolupracuje Indexer s ostatními Indexery, aby jim pomohl s nastavením pro multi-chain? - -- Poskytuje Indexer v průběhu programu zpětnou vazbu hlavním vývojářům nebo sdílí informace s Indexery na fórum? - -### 9. Jak bude přidělena role Discord? - -Moderátoři přidělí role v následujících dnech. - -### 10. Je v pořádku spustit program v testovací síti a poté přepnout na Mainnet? Budete schopni identifikovat můj uzel a zohlednit ho při rozdělování odměn? - -Ano, ve skutečnosti se to od vás očekává. Několik fází je na Görli a jedna je na mainnetu. - -### 11. V jakém okamžiku očekáváte, že účastníci přidají nasazení hlavní sítě? - -Ve fázi 3 bude vyžadován indexátor hlavní sítě. Více informací o tom bude [brzy sdíleno na této stránce s pojmy] (https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### Budou odměny podléhat vesting lhůtě? - -Procento, které má být rozděleno na konci programu, bude podléhat nároku. Více informací o této problematice bude uvedeno ve smlouvě s indexátorem. - -### 13. Budou mít všichni členové týmů s více než jedním členem role MIPs Discord? - -Ano - -### 14. Je možné použít uzamčené tokeny z programu Kurátor grafů k účasti v testnetu MIPs? - -An o - -### 15. Bude během programu MIPs existovat lhůta pro zpochybnění neplatných POI? - -Bude rozhodnuto. Vracejte se prosím pravidelně na tuto stránku pro další podrobnosti, nebo pokud je váš požadavek naléhavý, napište nám na info@thegraph.foundation - -### 17. Lze sloučit dvě smlouvy o vesting tokenů? - -Možnosti jsou následující: můžete delegovat jeden na druhý nebo spustit dva samostatné indexery. - -### 18. Otázky KYC? - -Prosím email info@thegraph.foundation - -### 19. Ještě nejsem připraven indexovat řetězec Gnosis, mohu naskočit a začít indexovat z jiného řetězce, až budu připraven? - -Ano - -### 20. Existují doporučené oblasti pro provoz serverů? - -Neposkytujeme doporučení týkající se regionů. Při výběru lokalit byste se mohli zamyslet nad tím, kde jsou hlavní trhy s kryptoměna. - -### 21. Co jsou to "náklady na plyn pro zpracování"? - -Jedná se o deterministickou míru nákladů na provedení obslužné rutiny. Na rozdíl od toho, co by mohl název napovídat, nesouvisí s náklady na plyn v blockchain. diff --git a/website/pages/cs/querying/_meta.js b/website/pages/cs/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/cs/querying/_meta.js +++ b/website/pages/cs/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/cs/querying/graph-client/_meta.js b/website/pages/cs/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/cs/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/de/_meta.js b/website/pages/de/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/de/_meta.js +++ b/website/pages/de/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/de/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/de/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/de/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/de/deploying/hosted-service.mdx b/website/pages/de/deploying/hosted-service.mdx deleted file mode 100644 index 1ea86b96a573..000000000000 --- a/website/pages/de/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Create a Subgraph - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/de/deploying/subgraph-studio.mdx b/website/pages/de/deploying/subgraph-studio.mdx deleted file mode 100644 index f2da63abff0b..000000000000 --- a/website/pages/de/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/de/developing/creating-a-subgraph.mdx b/website/pages/de/developing/creating-a-subgraph.mdx deleted file mode 100644 index 2c0070d0560c..000000000000 --- a/website/pages/de/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Overview - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Beispiele - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/de/developing/creating-a-subgraph/_meta.js b/website/pages/de/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/de/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/de/developing/graph-ts/_meta.js b/website/pages/de/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/de/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/de/managing/deprecate-a-subgraph.mdx b/website/pages/de/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/de/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/de/mips-faqs.mdx b/website/pages/de/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/de/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/de/querying/_meta.js b/website/pages/de/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/de/querying/_meta.js +++ b/website/pages/de/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/de/querying/graph-client/_meta.js b/website/pages/de/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/de/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/en/network/_meta.js b/website/pages/en/network/_meta.js index 555f264df379..db9450bd024d 100644 --- a/website/pages/en/network/_meta.js +++ b/website/pages/en/network/_meta.js @@ -6,4 +6,5 @@ export default { curating: '', developing: '', explorer: '', + contracts: '', } diff --git a/website/pages/en/publishing/_meta.js b/website/pages/en/publishing/_meta.js index b1c6ea436a54..956339c6b49e 100644 --- a/website/pages/en/publishing/_meta.js +++ b/website/pages/en/publishing/_meta.js @@ -1 +1,3 @@ -export default {} +export default { + 'publishing-a-subgraph': '', +} diff --git a/website/pages/en/release-notes/_meta.js b/website/pages/en/release-notes/_meta.js index b1c6ea436a54..541e5b7f6ea2 100644 --- a/website/pages/en/release-notes/_meta.js +++ b/website/pages/en/release-notes/_meta.js @@ -1 +1,4 @@ -export default {} +export default { + 'assemblyscript-migration-guide': '', + 'graphql-validations-migration-guide': '', +} diff --git a/website/pages/es/_meta.js b/website/pages/es/_meta.js index 891ed50b6162..f2f3b56163a5 100644 --- a/website/pages/es/_meta.js +++ b/website/pages/es/_meta.js @@ -1,22 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), - network: 'The Graph Network', - '###1': { - type: 'heading', - title: 'Subgrafos', - }, - developing: 'Desarrollando', - deploying: 'Deployando', - publishing: 'Publicando', - managing: 'Administrando', - querying: 'Consultando', - cookbook: 'Recetario', - 'release-notes': 'Notas de Publicación y Guías de Actualización', - '###3': { - type: 'heading', - title: 'Indexación', - }, - tokenomics: 'Tokenomics', + ...meta, } diff --git a/website/pages/es/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/es/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index e1891bcbd9bf..000000000000 --- a/website/pages/es/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Despliegue de un subgrafo en el Servicio Alojado ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Guardar el Token de Acceso - -Luego de crear la cuenta, navega a tu [dashboard](https://thegraph.com/hosted-service/dashboard). Copia el token de acceso que aparece en el dashboard y ejecuta `graph auth --product hosted-service `. Esto almacenará el token de acceso en tu computadora. Sólo tienes que hacerlo una vez, o si alguna vez regeneras el token de acceso. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Selecciona una imagen que se utilizará como imagen de vista previa y miniatura para el subgrafo. - -**Subgraph Name** -Junto con el nombre de la cuenta con la que se crea el subgrafo, esto también definirá el nombre de estilo `account-name/subgraph-name` utilizado para los deploys y los endpoints de GraphQL. _Este campo no puede ser cambiado posteriormente._ - -**Account** - La cuenta con la que se crea el subgrafo. Puede ser la cuenta de un individuo o de una organización. _Los Subgrafos no pueden ser movidos entre cuentas posteriormente._ - -**Subtitle** - Texto que aparecerá en las tarjetas del subgrafo. - -**Description** - Descripción del subgrafo, visible en la página de detalles del subgrafo. - -**GitHub URL** Enlace al repositorio de subgrafos en GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -El subgrafo lo deployas ejecutando `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -El estado del subgrafo cambia a `Synced` una vez que el Graph Node ha extraído todos los datos de los bloques históricos. El Graph Node continuará inspeccionando los bloques para tu subgrafo a medida que estos bloques sean minados. - -## Re-Deploy de un Subgrafo - -Cuando hagas cambios en la definición de tu subgrafo, por ejemplo, para arreglar un problema en los mapeos de entidades, ejecuta el comando `yarn deploy` de arriba de nuevo para deployar la versión actualizada de tu subgrafo. Cualquier actualización de un subgrafo requiere que Graph Node reindexe todo tu subgrafo, de nuevo empezando por el bloque génesis. - -Si tu subgrafo previamente deployado está todavía en estado `Syncing`, será inmediatamente reemplazado por la nueva versión deployada. Si el subgrafo previamente deployado ya está completamente sincronizado, Graph Node marcará la nueva versión deployada como `Pending Version`, la sincronizará en segundo plano, y sólo reemplazará la versión actualmente deployada por la nueva una vez que la sincronización de la nueva versión haya terminado. Esto asegura que tienes un subgrafo con el que trabajar mientras la nueva versión se sincroniza. - -## Desplegando el subgráfo en múltiples redes - -En algunos casos, querrás desplegar el mismo subgrafo en múltiples redes sin duplicar todo su código. El principal reto que conlleva esto es que las direcciones de los contratos en estas redes son diferentes. - -### Usando graph-cli - -Tanto `graph build` (desde `v0.29.0`) como `graph deploy` (desde `v0.32.0`) aceptan dos nuevas opciones: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -Puedes usar la opción `--network` para especificar una configuración de red desde un archivo estándar `json` (el valor predeterminado es `networks.json`) para actualizar fácilmente tu subgrafo durante el desarrollo. - -**Nota:** El comando `init` ahora generará automáticamente un `networks.json` basado en la información proporcionada. Luego podrás actualizar redes existentes o agregar redes adicionales. - -Si no tienes un archivo `networks.json`, deberás crear uno manualmente con la siguiente estructura: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Nota:** No tienes que especificar ninguna de las `templates` (si tienes alguna) en el archivo de configuración, solo el ` dataSources`. Si hay alguna `templates` declarada en el archivo `subgraph.yaml`, su red se actualizará automáticamente a la especificada con la opción `--network`. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Este es el aspecto que debe tener el archivo de configuración de tu red: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Ahora podemos ejecutar uno de los siguientes comandos: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Ahora está listo para `yarn deploy`. - -**Nota:** como se mencionó anteriormente, desde `graph-cli 0.32.0` puedes ejecutar directamente `yarn deploy` con la opción`--network`: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Usando la plantilla subgraph.yaml - -Una solución para las versiones antiguas de graph-cli que permite parametrizar aspectos como las direcciones de los contratos es generar partes del mismo mediante un sistema de plantillas como [Mustache](https://mustache.github.io/) o [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -y - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Junto con eso, sustituirías el nombre de la red y las direcciones en el manifiesto con marcadores de posición variables `{{network}}` y `{{address}}` y cambiarías el nombre del manifiesto a, por ejemplo `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Para generar un manifiesto para cualquiera de las redes, puedes agregar dos comandos adicionales a `package.json` junto con una dependencia de `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -Un ejemplo práctico de esto se puede encontrar [aqui](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Nota:** Este enfoque también se puede aplicar a situaciones más complejas, donde es necesario sustituir más que direcciones de contrato y nombres de red o donde también se generan asignaciones o ABIs a partir de plantillas. - -## Comprobando la salud del subgrafo - -Si un subgrafo se sincroniza con éxito, es una buena señal de que seguirá funcionando bien para siempre. Sin embargo, los nuevos activadores en la red pueden hacer que tu subgrafo alcance una condición de error no probada o puede comenzar a retrasarse debido a problemas de rendimiento o problemas con los operadores de nodos. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -Esto te dará el `chainHeadBlock` que puedes comparar con el `latestBlock` en tu subgrafo para verificar si se está quedando atrás. `synced` informa si el subgrafo alguna vez se ha puesto al día con la cadena. `health` actualmente puede tomar los valores de `healthy` si no se produjeron errores, o `failed` si hubo un error que detuvo el progreso del subgrafo. En este caso, puedes consultar el campo `fatalError` para obtener detalles sobre este error. - -## Política de archivo de subgrafos en el servicio alojado - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Política de archivo de subgrafos en Subgraph Studio - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Cada subgrafo afectado por esta política tiene una opción para recuperar la versión en cuestión. diff --git a/website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index c3d46b102d45..000000000000 --- a/website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Implementación de un Subgrafo en Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Instala The Graph CLI (con yarn o npm) -- Crea tu Subgrafo en Subgraph Studio -- Autentica tu cuenta desde la CLI -- Implementación de un Subgrafo en Subgraph Studio - -## Instalación de la CLI de Graph - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Instalar con yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Instalar con npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Crea tu Subgrafo en Subgraph Studio - -Antes de deployar tu subgrafo real necesitas crear un subgrafo en [Subgraph Studio](https://thegraph.com/studio/). Te recomendamos que leas nuestra [Studio documentation](/deploying/subgraph-studio) para aprender más sobre esto. - -## Inicializa tu Subgrafo - -Una vez que se haya creado tu subgrafo en Subgraph Studio, puedes inicializar el código del subgrafo usando este comando: - -```bash -graph init --studio -``` - -El valor `` se puede encontrar en la página de detalles de tu subgrafo en Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -Después de ejecutar `graph init`, se te pedirá que introduzcas la dirección del contrato, la red y la ABI que quieres consultar. Al hacer esto se generará una nueva carpeta en tu máquina local con algo de código básico para empezar a trabajar en tu subgrafo. Luego puedes finalizar tu subgrafo para asegurarte de que funciona como se espera. - -## Graph Auth - -Antes de poder deployar tu subgrafo en Subgraph Studio, necesitas entrar en tu cuenta dentro del CLI. Para ello, necesitarás tu clave de despliegue que puedes encontrar en tu página "My Subgraphs" o en la página de detalles de tu subgrafo. - -Este es el comando que debes usar para autenticarse desde la CLI: - -```bash -graph auth --studio -``` - -## Implementación de un Subgrafo en Subgraph Studio - -Una vez que estés listo, puedes implementar tu subgrafo en Subgraph Studio. Hacer esto no publicará tu subgrafo en la red descentralizada, solo lo implementará en tu cuenta de Studio, donde podrás probarlo y actualizar los metadatos. - -Este es el comando CLI que debes usar para implementar tu subgrafo. - -```bash -graph deploy --studio -``` - -Después de ejecutar este comando, el CLI pedirá una etiqueta de versión, puedes nombrarla como quieras, puedes usar etiquetas como `0.1` y `0.2` o usar letras también como `uniswap-v2-0.1`. Estas etiquetas serán visibles en el Graph Explorer y pueden ser utilizadas por los curadores para decidir si quieren señalar esta versión o no, así que elígelas sabiamente. - -Una vez implementado, puedes probar tu subgrafo en Subgraph Studio usando el playground, implementar otra versión si es necesario, actualizar los metadatos y, cuando estés listo, publicar tu subgrafo en Graph Explorer. diff --git a/website/pages/es/deploying/hosted-service.mdx b/website/pages/es/deploying/hosted-service.mdx deleted file mode 100644 index 3d5d34dc3995..000000000000 --- a/website/pages/es/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: '¿Qué es el Servicio Alojado?' ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -Para una lista completa, consulta [Redes Soportadas](/developing/supported-networks/#hosted-service). - -## Crear un Subgrafo - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### Desde un Contrato Existente - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -El `` en este caso es tu nombre de usuario u organización de GitHub, `` es el nombre de tu subgrafo y `` es el nombre opcional del directorio donde el graph init colocará el manifiesto del subgrafo de ejemplo. El `` es la dirección de tu contrato existente. `` es el nombre de la red Ethereum en la que vive el contrato. `` es una ruta local a un archivo ABI de contrato. **Tanto --la red como --abi son opcionales.** - -### De un Subgrafo de Ejemplo - -El segundo modo que admite `graph init` es la creación de un nuevo proyecto a partir de un subgrafo de ejemplo. El siguiente comando lo hace: - -``` -graph init --from-example --product hosted-service / [] -``` - -El subgrafo de ejemplo se basa en el contrato Gravity de Dani Grant que administra los avatares de los usuarios y emite eventos `UpdateGravatar` o `UpdateGravatar` cada vez que se crean o actualizan avatares. El subgrafo maneja estos eventos escribiendo entidades `Gravatar` en el almacén de Graph Node y asegurándose de que se actualicen de acuerdo con los eventos. Continúa con el [manifiesto del subgrafo](/developing/creating-a-subgraph#the-subgraph-manifest) para comprender mejor a qué eventos de sus contratos inteligentes debe prestar atención, mappings y más. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -Puedes encontrar la lista de redes admitidas [aquí](/developing/supported-networks). diff --git a/website/pages/es/deploying/subgraph-studio.mdx b/website/pages/es/deploying/subgraph-studio.mdx deleted file mode 100644 index 27f193d14be3..000000000000 --- a/website/pages/es/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Bienvenido a tu nuevo punto de partida 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Crear un subgrafo a través de la UI de Studio -- Deployar un subgrafo usando el CLI -- Publicar un subgrafo con la UI de Studio -- Realizar una prueba en el Playground -- Integrarlo en el staging usando la URL de consulta -- Crear y gestionar sus claves API para subgrafos específicos - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Consultar subgrafos genera tarifas de consulta, que se utilizan para recompensar a los [Indexadores](/network/indexing) en The Graph Network. Si eres un desarrollador de dapps o un desarrollador de subgrafos, Studio te permitirá crear mejores subgrafos para potenciar tus consultas o las de tu comunidad. El Studio se compone de 5 partes principales: - -- Los controles de tu cuenta de usuario -- Una lista de los subgrafos que has creado -- Una sección para gestionar, ver los detalles y visualizar el estado de un subgrafo específico -- Una sección para gestionar las claves de la API que necesitarás para consultar un subgrafo -- Una sección para gestionar tu facturación - -## Cómo Crear tu Cuenta - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Una vez que te registres, verás tu clave única de deploy en la página de inicio de tu cuenta. Esto te permitirá publicar tus subgrafos o gestionar tus claves API + facturación. Tendrás una clave de deploy única que se puede volver a generar si crees que ha sido comprometida. - -## How to Create a Subgraph in Subgraph Studio - - - -## Compatibilidad de los Subgrafos con The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Indexa una [red soportada](/developing/supported-networks) -- No debe utilizar ninguna de las siguientes funciones: - - ipfs.cat & ipfs.map - - Errores no fatales - - Grafting - -Se añadirán más funciones & redes a The Graph Network de forma gradual. - -### Ciclo de vida de un Subgrafo - -![Ciclo de vida de un Subgrafo](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Publicar tu Subgrafo en Subgraph Studio - -Si quieres probar tu subgrafo antes de publicarlo en la red, puedes hacerlo en el Subgraph **Playground** o mirar tus registros. Los registros de subgrafo te dirán **dónde** falla tu subgrafo en el caso de que lo haga. - -## Publica tu Subgrafo en Subgraph Studio - -¡Has llegado hasta aquí, felicidades! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -También puedes ver el resumen en video a continuación: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Los indexadores deben presentar registros obligatorios de prueba de indexación a partir de un hash de bloque específico. Debido a que la publicación de un subgrafo es una acción realizada on-chain, recuerda que la transacción puede tardar algunos minutos en procesarse. Cualquier dirección que uses para publicar el contrato será la única que pueda publicar versiones futuras. ¡Elige sabiamente! - -Los subgrafos con señal de curación se muestran a los Indexadores para que puedan ser indexados en la red descentralizada. Puedes publicar los subgrafos y la señal en una sola transacción, lo que te permite acuñar la primera señal de curación en el subgrafo y ahorrar en costes de gas. Al añadir tu señal a la señal proporcionada posteriormente por los Curadores, tu subgrafo también tendrá una mayor probabilidad de servir finalmente a las consultas. - -**Ahora que has publicado tu subgrafo, veamos cómo lo vas a gestionar de forma regular.** Ten en cuenta que no puedes publicar tu subgrafo en la red si ha fallado la sincronización. Esto se debe normalmente a que el subgrafo tiene errores - ¡los registros te dirán dónde están esos problemas! - -## Versionando tu Subgrafo con el CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Hay que tener en cuenta que la publicación de una nueva versión de un subgrafo en la red tiene un coste. Además de las tarifas de transacción, los desarrolladores también deben financiar una parte de la tarifa de curación de la señal de auto-migración. No puedes publicar una nueva versión de tu subgrafo si los curadores no han señalado sobre él. Para más información sobre los riesgos de la curación, lee más [aquí](/network/curating). - -### Archivado Automático de Versiones de Subgrafos - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Desarchivar](/img/Unarchive.png) diff --git a/website/pages/es/developing/creating-a-subgraph.mdx b/website/pages/es/developing/creating-a-subgraph.mdx deleted file mode 100644 index 7483a5493405..000000000000 --- a/website/pages/es/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creación de un subgrafo ---- - -Un subgrafo extrae datos de una blockchain, los procesa y los almacena para que puedan consultarse fácilmente mediante GraphQL. - -![Definir un Subgrafo](/img/defining-a-subgraph.png) - -La definición del subgrafo consta de unos cuantos archivos: - -- `subgraph.yaml`: un archivo YAML que contiene el manifiesto del subgrafo - -- `schema.graphql`: un esquema GraphQL que define qué datos se almacenan para su subgrafo, y cómo consultarlos a través de GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) codigo que traduce de los datos del evento a las entidades definidas en su esquema (por ejemplo `mapping.ts` en este tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Instalar The Graph CLI - -The Graph CLI está escrito en JavaScript, y tendrás que instalar `yarn` o `npm` para utilizarlo; se asume que tienes yarn en lo que sigue. - -Una vez que tengas `yarn`, instala The Graph CLI ejecutando - -**Instalar con yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Instalar con npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## Desde un Contrato Existente - -El siguiente comando crea un subgrafo que indexa todos los eventos de un contrato existente. Intenta obtener la ABI del contrato desde Etherscan y vuelve a solicitar una ruta de archivo local. Si falta alguno de los argumentos opcionales, te lleva a través de un formulario interactivo. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -El `` es el ID de tu subgrafo en Subgraph Studio, y se puede encontrar en la página de detalles de tu subgrafo. - -## Desde un Subgrafo de Ejemplo - -El segundo modo que admite `graph init` es la creación de un nuevo proyecto a partir de un subgrafo de ejemplo. El siguiente comando lo hace: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Añadir nuevas fuentes de datos a un subgrafo existente - -Desde `v0.31.0`, `graph-cli` permite añadir nuevos dataSources a un subgrafo existente mediante el comando `graph add`. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -El comando `add` obtendrá el ABI de Etherscan (a menos que se especifique una ruta ABI con la opción `--abi`), y creará un nuevo `dataSource` de la misma manera que el comando `graph init` crea un `dataSource` `--from-contract`, actualizando el esquema y los mappings de manera acorde. - -La opción `--merge-entities` identifica cómo el desarrollador desea manejar los conflictos de nombres de `entity` y `event`: - -- Si es `true`: el nuevo `dataSource` debe utilizar los `eventHandlers`& `entities` existentes. -- Si es `false`: se creará una nueva entidad & event handler con `${dataSourceName}{EventName}`. - -La `address` del contrato se escribirá en el archivo `networks.json` para la red correspondiente. - -> **Nota**: Cuando se utiliza el cli interactivo, después de ejecutar correctamente `graph init`, se te pedirá que añadas un nuevo `dataSource`. - -## El Manifiesto de Subgrafo - -El manifiesto del subgrafo `subgraph.yaml` define los contratos inteligentes que indexa tu subgrafo, a qué eventos de estos contratos prestar atención, y cómo mapear los datos de los eventos a las entidades que Graph Node almacena y permite consultar. La especificación completa de los manifiestos de subgrafos puede encontrarse en [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -Para este subgrafo de ejemplo, `subgraph.yaml` es: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -Las entradas importantes a actualizar para el manifiesto son: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: una lista de todos los nombres de las [feature](#experimental-features) usadas. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: la dirección del contrato inteligente del que procede el subgrafo, y la ABI del contrato inteligente a utilizar. La dirección es opcional; omitirla permite indexar eventos coincidentes de todos los contratos. - -- `dataSources.source.startBlock`: el número opcional del bloque desde el que la fuente de datos comienza a indexar. En la mayoría de los casos, sugerimos utilizar el bloque en el que se creó el contrato. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: las entidades que la fuente de datos escribe en el almacén. El esquema de cada entidad se define en el archivo schema.graphql. - -- `dataSources.mapping.abis`: uno o más archivos ABI con nombre para el contrato fuente, así como cualquier otro contrato inteligente con el que interactúes desde los mappings. - -- `dataSources.mapping.eventHandlers`: enumera los eventos de contratos inteligentes a los que reacciona este subgrafo y los handlers en el mapping -./src/mapping.ts en el ejemplo- que transforman estos eventos en entidades en el almacén. - -- `dataSources.mapping.callHandlers`: enumera las funciones de contrato inteligente a las que reacciona este subgrafo y los handlers en el mapping que transforman las entradas y salidas a las llamadas de función en entidades en el almacén. - -- `dataSources.mapping.blockHandlers`: enumera los bloques a los que reacciona este subgrafo y los handlers en el mapping que se ejecutan cuando se agrega un bloque a la cadena. Sin un filtro, el handler de bloque se ejecutará cada bloque. Se puede proporcionar un filtro de llamada opcional añadiendo un campo con `filter` field with `kind: call` al handler. Este sólo ejecutará el handler si el bloque contiene al menos una llamada al contrat de la fuente de datos. - -Un único subgrafo puede indexar datos de múltiples contratos inteligentes. Añade una entrada por cada contrato del que haya que indexar datos a la array `dataSources`. - -### Order of Triggering Handlers - -Las triggers de una fuente de datos dentro de un bloque se ordenan mediante el siguiente proceso: - -1. Las triggers de eventos y calls se ordenan primero por el índice de la transacción dentro del bloque. -2. Los triggers de eventos y calls dentro de la misma transacción se ordenan siguiendo una convención: primero los triggers de eventos y luego los de calls, respetando cada tipo el orden en que se definen en el manifiesto. -3. Las triggers de bloques se ejecutan después de las triggers de eventos y calls, en el orden en que están definidos en el manifiesto. - -Estas normas de orden están sujetas a cambios. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Notas del lanzamiento | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Obtención de ABIs - -Los archivos ABI deben coincidir con tu(s) contrato(s). Hay varias formas de obtener archivos ABI: - -- Si estás construyendo tu propio proyecto, es probable que tengas acceso a tus ABIs más actuales. -- Si estás construyendo un subgrafo para un proyecto público, puedes descargar ese proyecto en tu computadora y obtener la ABI utilizando [`truffle compile`](https://truffleframework.com/docs/truffle/overview) o usando solc para compilar. -- También puedes encontrar la ABI en [Etherscan](https://etherscan.io/), pero no siempre es fiable, ya que la ABI que se sube allí puede estar desactualizada. Asegúrate de que tienes la ABI correcta, de lo contrario la ejecución de tu subgrafo fallará. - -## El Esquema GraphQL - -El esquema para tu subgrafo está en el archivo `schema.graphql`. Los esquemas GraphQL se definen utilizando el lenguaje de definición de interfaces GraphQL. Si nunca has escrito un esquema GraphQL, te recomendamos que le eches un vistazo a este manual sobre el sistema de tipos GraphQL. La documentación de referencia para los esquemas GraphQL se puede encontrar en la sección [GraphQL API](/querying/graphql-api). - -## Definir Entidades - -Antes de definir las entidades, es importante dar un paso atrás y pensar en cómo están estructurados y vinculados los datos. Todas las consultas se harán contra el modelo de datos definido en el esquema del subgrafo y las entidades indexadas por el subgrafo. Debido a esto, es bueno definir el esquema del subgrafo de una manera que coincida con las necesidades de tu dapp. Puede ser útil imaginar las entidades como "objetos que contienen datos", más que como eventos o funciones. - -Con The Graph, simplemente defines los tipos de entidad en `schema.graphql` y Graph Node generará campos de nivel superior para consultar instancias individuales y colecciones de ese tipo de entidad. Cada tipo que deba ser una entidad debe ser anotado con una directiva `@entity`. Por defecto, las entidades son mutables, lo que significa que los mappings pueden cargar entidades existentes, modificarlas y almacenar una nueva versión de esa entidad. La mutabilidad tiene un precio, y para los tipos de entidades que se sabe que nunca se modificarán, por ejemplo, porque simplemente contienen datos extraídos textualmente de la cadena, se recomienda marcarlas como inmutables con `@entity(immutable: true)`. Los mappings pueden realizar cambios en las entidades inmutables siempre que esos cambios se produzcan en el mismo bloque en el que se creó la entidad. Las entidades inmutables son mucho más rápidas de escribir y de consultar, por lo que deberían utilizarse siempre que sea posible. - -### Un buen ejemplo - -La entidad `Gravatar` que aparece a continuación está estructurada en torno a un objeto Gravatar y es un buen ejemplo de cómo podría definirse una entidad. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Un mal ejemplo - -El ejemplo las entidades `GravatarAccepted` y `GravatarDeclined` que aparecen a continuación se basan en eventos. No se recomienda asignar eventos o calls a funciones a entidades 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Campos opcionales y obligatorios - -Los campos de la entidad pueden definirse como obligatorios u opcionales. Los campos obligatorios se indican con el `!` en el esquema. Si un campo obligatorio no está establecido en el mapping, recibirá este error al consultar el campo: - -``` -Null value resolved for non-null field 'name' -``` - -Cada entidad debe tener un campo `id`, que debe ser de tipo `Bytes!` o `String!`. Por lo general, se recomienda utilizar `Bytes!`, a menos que el `id` contenga texto legible por humanos, ya que las entidades con ids de `Bytes!` serán más rápidas de escribir y consultar que las que tienen un `id` de `String!` El campo `id` sirve como clave primaria y debe ser único entre todas las entidades del mismo tipo. Por razones históricas, el tipo `ID!` también se acepta y es un sinónimo de `String!`. - -Para algunos tipos de entidad, el `id` se construye a partir de los id de otras dos entidades; esto es posible utilizando `concat`, por ejemplo, `let id = left.id.concat(right.id)` para formar el id a partir de los id de `left` y `right`. Del mismo modo, para construir un id a partir del id de una entidad existente y un contador, se puede utilizar `count` y `let id = left.id.concatI32(count)`. Se garantiza que la concatenación produzca id's únicos siempre que la longitud de `left` sea la misma para todas esas entidades, por ejemplo, porque `left.id` es una `Address`. - -### Tipos de Scalars incorporados - -#### GraphQL admite Scalars - -Admitimos los siguientes escalares en nuestra API GraphQL: - -| Tipo | Descripción | -| --- | --- | -| `Bytes` | Byte array, representado como un string hexadecimal. Comúnmente utilizado para los hashes y direcciones de Ethereum. | -| `String` | Escalar para valores `string`. Los caracteres nulos no son compatibles y se eliminan automáticamente. | -| `Boolean` | Escalar para valores `boolean`. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Números enteros grandes. Se utiliza para los tipos `uint32`, `int64`, `uint64`, ..., `uint256` de Ethereum. Nota: Todo por debajo de `uint32`, como `int32`, `uint24` o `int8` se representa como `i32`. | -| `BigDecimal` | `BigDecimal` Decimales de alta precisión representados como un signo y un exponente. El rango de exponentes va de -6143 a +6144. Redondeado a 34 dígitos significativos. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -También puedes crear enums dentro de un esquema. Los enums tienen la siguiente sintaxis: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Una vez definido el enum en el esquema, puedes utilizar la representación del string del valor del enum para establecer un campo enum en una entidad. Por ejemplo, puedes establecer el `tokenStatus` a `SecondOwner` definiendo primero tu entidad y posteriormente estableciendo el campo con `entity.tokenStatus = "SecondOwner`. El siguiente ejemplo muestra el aspecto de la entidad Token con un campo enum: - -Puedes encontrar más detalles sobre la escritura de enums en la [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Relaciones entre Entidades - -Una entidad puede tener una relación con otra u otras entidades de su esquema. Estas relaciones pueden ser recorridas en sus consultas. Las relaciones en The Graph son unidireccionales. Es posible simular relaciones bidireccionales definiendo una relación unidireccional en cada "extremo" de la relación. - -Las relaciones se definen en las entidades como cualquier otro campo, salvo que el tipo especificado es el de otra entidad. - -#### Relaciones Uno a Uno - -Define un tipo de entidad `Transaction` con una relación opcional de uno a uno con un tipo de entidad `TransactionReceipt`: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### Relaciones one-to-many - -Define un tipo de entidad `TokenBalance` con una relación requerida de uno a varios con un tipo de entidad Token: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Búsquedas Inversas - -Se pueden definir búsquedas inversas en una entidad a través del campo `@derivedFrom`. Esto crea un campo virtual en la entidad que puede ser consultado pero que no puede ser establecido manualmente a través de la API de mapping. Más bien, se deriva de la relación definida en la otra entidad. Para este tipo de relaciones, rara vez tiene sentido almacenar ambos lados de la relación, y tanto la indexación como el rendimiento de la consulta serán mejores cuando sólo se almacene un lado y el otro se derive. - -En el caso de las relaciones one-to-many, la relación debe almacenarse siempre en el lado "one", y el lado "many" debe derivarse siempre. Almacenar la relación de esta manera, en lugar de almacenar una array de entidades en el lado "many", resultará en un rendimiento dramáticamente mejor tanto para la indexación como para la consulta del subgrafo. En general, debe evitarse, en la medida de lo posible, el almacenamiento de arrays de entidades. - -#### Ejemplo - -Podemos hacer que los balances de un token sean accesibles desde el token derivando un campo `tokenBalances`: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Relaciones de many-to-many - -Para las relaciones de many-to-many, como los usuarios pueden pertenecer a cualquier número de organizaciones, la forma más directa, pero generalmente no la más eficaz, de modelar la relación es en un array en cada una de las dos entidades implicadas. Si la relación es simétrica, sólo es necesario almacenar un lado de la relación y el otro puede derivarse. - -#### Ejemplo - -Define una búsqueda inversa desde un tipo de entidad `User` a un tipo de entidad `Organization`. En el ejemplo siguiente, esto se consigue buscando el atributo `members` desde la entidad `Organization`. En las consultas, el campo `organizations` en `User` se resolverá buscando todas las entidades de `Organization` que incluyan el ID del usuario. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -Una forma más eficaz de almacenar esta relación es a través de una tabla de mapping que tiene una entrada para cada par `User` / `Organization` con un esquema como - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -Este enfoque requiere que las consultas desciendan a un nivel adicional para recuperar, por ejemplo, las organizaciones para los usuarios: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -Esta forma más elaborada de almacenar las relaciones many-to-many se traducirá en menos datos almacenados para el subgrafo y, por tanto, en un subgrafo que suele ser mucho más rápido de indexar y consultar. - -#### Agregar comentarios al esquema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Definición de campos de búsqueda de texto completo - -Las consultas de búsqueda de texto completo filtran y clasifican las entidades basándose en una entrada de búsqueda de texto. Las consultas de texto completo pueden devolver coincidencias de palabras similares procesando el texto de la consulta en stems antes de compararlo con los datos del texto indexado. - -La definición de una consulta de texto completo incluye el nombre de la consulta, el diccionario lingüístico utilizado para procesar los campos de texto, el algoritmo de clasificación utilizado para ordenar los resultados y los campos incluidos en la búsqueda. Cada consulta de texto completo puede abarcar varios campos, pero todos los campos incluidos deben ser de un solo tipo de entidad. - -Para agregar una consulta de texto completo, incluye un tipo `_Schema_` con una directiva de texto completo en el esquema GraphQL. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -El ejemplo campo `bandSearch` se puede utilizar en las consultas para filtrar las entidades `Band` con base en los documentos de texto en los campos `name`, `description`, y `bio`. Ve a [GraphQL API - Queries](/querying/graphql-api#queries) para ver una descripción de la API de búsqueda de texto completo y más ejemplos de uso. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** Desde `specVersion` `0.0.4` y en adelante, `fullTextSearch` se debe declarar bajo la sección `features` en el manifiesto del subgrafo. - -### Idiomas admitidos - -La elección de un idioma diferente tendrá un efecto definitivo, aunque a veces sutil, en la API de búsqueda de texto completo. Los campos cubiertos por un campo de consulta de texto completo se examinan en el contexto de la lengua elegida, por lo que los lexemas producidos por las consultas de análisis y búsqueda varían de un idioma a otro. Por ejemplo: al utilizar el diccionario turco compatible, "token" se convierte en "toke", mientras que el diccionario inglés lo convierte en "token". - -Diccionarios de idiomas admitidos: - -| Código | Diccionario | -| ------ | ----------- | -| simple | General | -| da | Danés | -| nl | Holandés | -| en | Inglés | -| fi | Finlandés | -| fr | Francés | -| de | Alemán | -| hu | Húngaro | -| it | Italiano | -| no | Noruego | -| pt | Portugués | -| ro | Rumano | -| ru | Ruso | -| es | Español | -| sv | Sueco | -| tr | Turco | - -### Algoritmos de Clasificación - -Algoritmos admitidos para ordenar los resultados: - -| Algoritmos | Descripción | -| --- | --- | -| rango | Usa la calidad de coincidencia (0-1) de la consulta de texto completo para ordenar los resultados. | -| rango de proximidad | Similar al rango, pero también incluye la proximidad de los matches. | - -## Escribir Mappings - -Los mapeos toman datos de una fuente particular y los transforman en entidades que están definidas dentro de su esquema. Los mapeos se escriben en un subconjunto de [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) llamado [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) que puede compilarse a WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript es más estricto que TypeScript normal, pero proporciona una sintaxis familiar. - -Para cada handler de eventos que se define en `subgraph.yaml` bajo `mapping.eventHandlers`, crea una función exportada del mismo nombre. Cada handler debe aceptar un único parámetro llamado `event` con un tipo correspondiente al nombre del evento que se está manejando. - -En el subgrafo de ejemplo, `src/mapping.ts` contiene handlers para los eventos `NewGravatar` y `UpdatedGravatar`: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -El primer handler toma un evento `NewGravatar` y crea una nueva entidad `Gravatar` con `new Gravatar(event.params.id.toHex())`, poblando los campos de la entidad usando los parámetros correspondientes del evento. Esta instancia de entidad está representada por la variable `gravatar`, con un valor de id de `event.params.id.toHex()`. - -El segundo handler intenta cargar el `Gravatar` existente desde el almacén de The Graph Node. Si aún no existe, se crea bajo demanda. A continuación, la entidad se actualiza para que coincida con los nuevos parámetros del evento, antes de volver a guardarla en el almacén mediante `gravatar.save()`. - -### ID recomendados para la creación de nuevas entidades - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Generación de código - -Para que trabajar con contratos inteligentes, eventos y entidades sea fácil y seguro desde el punto de vista de los tipos, Graph CLI puede generar tipos AssemblyScript a partir del esquema GraphQL del subgrafo y de las ABIs de los contratos incluidas en las fuentes de datos. - -Esto se hace con - -```sh -graph codegen [--output-dir ] [] -``` - -pero en la mayoría de los casos, los subgrafos ya están preconfigurados a través de `package.json` para permitirte simplemente ejecutar uno de los siguientes para lograr lo mismo: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Esto generará una clase AssemblyScript para cada contrato inteligente en los archivos ABI mencionados en `subgraph.yaml`, permitiéndote vincular estos contratos a direcciones específicas en los mappings y llamar a métodos de contrato de sólo lectura contra el bloque que se está procesando. También generará una clase para cada evento del contrato para facilitar el acceso a los parámetros del evento, así como el bloque y la transacción que originó el evento. Todos estos tipos se escriben en `//.ts`. En el subgrafo de ejemplo, esto sería `generated/Gravity/Gravity.ts`, permitiendo mappings con los que importar estos tipos. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -Además, se genera una clase para cada tipo de entidad en el esquema GraphQL del subgrafo. Estas clases proporcionan una carga de entidades segura, acceso de lectura y escritura a los campos de la entidad, así como un método `save()` para escribir entidades en el almacén. Todas las clases de entidades se escriben en `/schema.ts`, permitiendo mappings con los que importarlos - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Nota:** La generación de código debe realizarse de nuevo después de cada cambio en el esquema GraphQL o en las ABIs incluidas en el manifiesto. También debe realizarse al menos una vez antes de construir o deployar el subgrafo. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Plantillas para fuentes de datos - -Un patrón común en los contratos inteligentes de Ethereum es el uso de contratos de registro o fábrica, donde un contrato crea, gestiona o hace referencia a un número arbitrario de otros contratos que tienen cada uno su propio estado y eventos. - -Las direcciones de estos subcontratos pueden o no ser conocidas de antemano y muchos de estos contratos pueden ser creados y/o añadidos con el tiempo. Es por eso que, en tales casos, es imposible definir una sola fuente de datos o un número fijo de fuentes de datos y se necesita un enfoque más dinámico: _plantillas de fuente de datos_. - -### Fuente de Datos para el Contrato Principal - -En primer lugar, define una fuente de datos regular para el contrato principal. El siguiente fragmento muestra un ejemplo simplificado de fuente de datos para el contrato generador del exchange [Uniswap](https://uniswap.org). Nota el handler `NewExchange(address,address)` del evento. Esto se emite cuando el contrato de fábrica crea un nuevo contrato de exchange en la cadena. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Plantillas de fuentes de datos para contratos creados dinámicamente - -A continuación, añade _plantillas de origen de datos_ al manifiesto. Son idénticas a las fuentes de datos normales, salvo que carecen de una dirección de contrato predefinida en `source`. Normalmente, defines un modelo para cada tipo de subcontrato gestionado o referenciado por el contrato principal. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instanciación de una plantilla de fuente de datos - -En el último paso, actualiza el mapping del contrato principal para crear una instancia de fuente de datos dinámica a partir de una de las plantillas. En este ejemplo, cambiarías el mapping del contrato principal para importar la plantilla `Exchange` y llamaría al método `Exchange.create(address)` en él para empezar a indexar el nuevo contrato de exchange. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Nota:** Un nuevo origen de datos sólo procesará las llamadas y los eventos del bloque en el que fue creado y todos los bloques siguientes, pero no procesará los datos históricos, es decir, los datos que están contenidos en bloques anteriores. -> -> Si los bloques anteriores contienen datos relevantes para la nueva fuente de datos, lo mejor es indexar esos datos leyendo el estado actual del contrato y creando entidades que representen ese estado en el momento de crear la nueva fuente de datos. - -### Contexto de la fuente de datos - -Los contextos de fuentes de datos permiten pasar una configuración extra al instanciar una plantilla. En nuestro ejemplo, digamos que los exchanges se asocian a un par de trading concreto, que se incluye en el evento `NewExchange`. Esa información se puede pasar a la fuente de datos instanciada, así: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Dentro de un mapping de la plantilla `Exchange`, se puede acceder al contexto: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Hay setters y getters como `setString` and `getString` para todos los tipos de valores. - -## Bloques iniciales - -El `startBlock` es un ajuste opcional que permite definir a partir de qué bloque de la cadena comenzará a indexar la fuente de datos. Establecer el bloque inicial permite a la fuente de datos omitir potencialmente millones de bloques que son irrelevantes. Normalmente, un desarrollador de subgrafos establecerá `startBlock` al bloque en el que se creó el contrato inteligente de la fuente de datos. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Nota:** El bloque de creación del contrato se puede buscar rápidamente en Etherscan: -> -> 1. Busca el contrato introduciendo su dirección en la barra de búsqueda. -> 2. Haz clic en el hash de la transacción de creación en la sección `Contract Creator`. -> 3. Carga la página de detalles de la transacción, donde encontrarás el bloque inicial de ese contrato. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -Aunque los eventos proporcionan una forma eficaz de recoger los cambios relevantes en el estado de un contrato, muchos contratos evitan generar registros para optimizar los costos de gas. En estos casos, un subgrafo puede suscribirse a las calls realizadas al contrato de la fuente de datos. Esto se consigue definiendo los call handlers que hacen referencia a la firma de la función y al handler de mapping que procesará las calls a esta función. Para procesar estas calls, el handler de mapping recibirá un `ethereum.Call` como argumento con las entradas y salidas tipificadas de la call. Las calls realizadas en cualquier profundidad de la cadena de calls de una transacción activarán el mapping, permitiendo capturar la actividad con el contrato de origen de datos a través de los contratos proxy. - -Los call handlers solo se activarán en uno de estos dos casos: cuando la función especificada sea llamada por una cuenta distinta del propio contrato o cuando esté marcada como externa en Solidity y sea llamada como parte de otra función en el mismo contrato. - -> **Nota**: Los call handlers dependen actualmente de la API de rastreo Parity. Ciertas redes, como BNB chain y Arbitrum, no soportan esta API. Si un subgrafo que indexa una de estas redes contiene uno o más call handlers, no comenzará la sincronización. En su lugar, los developers de subgrafos deberían utilizar handlers de eventos. Estos son mucho más eficaces que los handlers de llamadas, y están soportados en todas las redes evm. - -### Definición de un Call Handler - -Para definir un call handler en su manifiesto simplemente añade una array `callHandlers` bajo la fuente de datos a la que deseas suscribirte. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -La `function` es la firma de la función normalizada por la que se filtran las llamadas. La propiedad `handler` es el nombre de la función de tu mapping que quieres ejecutar cuando se llame a la función de destino en el contrato de origen de datos. - -### Función mapeo - -Cada call handler toma un solo parámetro que tiene un tipo correspondiente al nombre de la función llamada. En el subgrafo de ejemplo anterior, el mapping contiene un handler para cuando se llama a la función `createGravatar` y recibe un parámetro `CreateGravatarCall` como argumento: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -La función `handleCreateGravatar` toma una nueva `CreateGravatarCall` que es una subclase de `ethereum.Call`, proporcionada por `@graphprotocol/graph-ts`, que incluye las entradas y salidas tipificadas del call. El tipo `CreateGravatarCall` se genera por ti cuando ejecutas `graph codegen`. - -## Handlers de bloques - -Además de suscribirse a eventos del contracto o calls de funciones, un subgrafo puede querer actualizar sus datos a medida que se añaden nuevos bloques a la cadena. Para ello, un subgrafo puede ejecutar una función después de cada bloque o después de los bloques que coincidan con un filtro predefinido. - -### Filtros admitidos - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_El handler definido será llamado una vez por cada bloque que contenga una llamada al contrato (fuente de datos) bajo el cual está definido el handler._ - -> **Nota**: El filtro `call` depende actualmente de la API de rastreo Parity. Ciertas redes, como la cadena BNB y Arbitrum, no soportan esta API. Si un subgrafo que indexa una de estas redes contiene uno o más handlers de bloque con un filtro de `call`, no comenzará la sincronización. - -La ausencia de un filtro para un handler de bloque asegurará que el handler sea llamado en cada bloque. Una fuente de datos solo puede contener un handler de bloque para cada tipo de filtro. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Función mapeo - -La función de mapeo recibirá un `ethereum.Block` como único argumento. Al igual que las funciones de mapping para eventos, esta función puede acceder a entidades de subgrafos existentes en el almacén, llamar a contratos inteligentes y crear o actualizar entidades. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Eventos anónimos - -Si necesitas procesar eventos anónimos en Solidity, puedes hacerlo proporcionando el tema 0 del evento, como en el ejemplo: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -Un evento solo se activará cuando la firma y el tema 0 coincidan. Por defecto, `topic0` es igual al hash de la firma del evento. - -## Recepción de transacciones en Event Handlers - -A partir de `specVersion` `0.0.5` y `apiVersion` `0.0.7`, los handlers de eventos pueden tener acceso al recibo de la transacción que los emitió. - -Para ello, los handlers de eventos deben declararse en el manifiesto del subgrafo con la nueva clave `receipt: true`, que es opcional y por defecto es false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Dentro de la función handler, se puede acceder al recibo en el campo `Event.receipt`. Cuando la clave `receipt` se establece en `false` o se omite en el manifiesto, se devolverá un valor `null` en su lugar. - -## Características experimentales - -A partir de `specVersion` `0.0.4`, los features de los subgrafos deben declararse explícitamente en la sección de `features` del nivel superior del archivo de manifiesto, utilizando su nombre en `camelCase`, como se indica en la tabla siguiente: - -| Característica | Nombre | -| ---------------------------------------------------- | ---------------- | -| [Errores no fatales](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -Por ejemplo, si un subgrafo utiliza las características **Full-Text Search** y **Non-fatal Errors**, el campo `features` del manifiesto debería ser: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Ten en cuenta que el uso de una característica sin declararla incurrirá en un **error de validación** durante el deploy del subgrafo, pero no se producirá ningún error si se declara una característica pero no se utiliza. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Errores no fatales - -Los errores de indexación en subgrafos ya sincronizados provocarán, por defecto, que el subgrafo falle y deje de sincronizarse. Los subgrafos pueden ser configurados de manera alternativa para continuar la sincronización en presencia de errores, ignorando los cambios realizados por el handler que provocó el error. Esto da a los autores de los subgrafos tiempo para corregir sus subgrafos mientras las consultas continúan siendo servidas contra el último bloque, aunque los resultados serán posiblemente inconsistentes debido al bug que provocó el error. Nótese que algunos errores siguen siendo siempre fatales, para que el error no sea fatal debe saberse que es deterministico. - -> **Nota:** The Graph Network todavía no admite errores no fatales, y los developers no deben deployar subgrafos que utilicen esa funcionalidad en la red a través de Studio. - -Para activar los errores no fatales es necesario establecer el siguiente indicador en el manifiesto del subgrafo: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -La consulta también debe optar por consultar los datos con posibles incoherencias a través del argumento `subgraphError`. También se recomienda consultar `_meta` para comprobar si el subgrafo ha saltado por encima de los errores, como en el ejemplo: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Si el subgrafo encuentra un error esa consulta devolverá tanto los datos como un error de graphql con el mensaje `"indexing_error"`, como en este ejemplo de respuesta: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting sobre subgrafos existentes - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -Cuando un subgrafo es deployado por primera vez, comienza a indexar eventos en el bloque génesis de la cadena correspondiente (o en el `startBlock` definido con cada fuente de datos) En algunas circunstancias, es beneficioso reutilizar los datos de un subgrafo existente y comenzar a indexar en un bloque mucho más tardío. Este modo de indexación se denomina _Grafting_. El Grafting es, por ejemplo, útil durante el desarrollo para superar rápidamente errores simples en los mappings, o para hacer funcionar temporalmente un subgrafo existente después de que haya fallado. - -Un subgrafo se graftea en un subgrafo base cuando el manifiesto de subgrafo `subgraph.yaml` contiene un bloque `graft` en el nivel superior: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -Cuando se realiza el deploy de un subgrafo cuyo manifiesto contiene un bloque `graft`, Graph Node copiará los datos del subgrafo `base` hasta el `block` dado, inclusive, y luego continuará indexando el nuevo subgrafo a partir de ese bloque. El subgrafo base debe existir en la instancia de Graph Node de destino y debe haber indexado hasta al menos el bloque dado. Debido a esta restricción, el grafting sólo debería utilizarse durante el desarrollo o durante una emergencia para acelerar la producción de un subgrafo equivalente no grafted (injertado). - -Debido a que el grafting copia en lugar de indexar los datos base, es mucho más rápido llevar el subgrafo al bloque deseado que indexar desde cero, aunque la copia inicial de los datos aún puede llevar varias horas para subgrafos muy grandes. Mientras se inicializa el subgrafo grafted, Graph Node registrará información sobre los tipos de entidad que ya han sido copiados. - -El subgrafo grafteado puede utilizar un esquema GraphQL que no es idéntico al del subgrafo base, sino simplemente compatible con él. Tiene que ser un esquema de subgrafo válido por sí mismo, pero puede diferir del esquema del subgrafo base de las siguientes maneras: - -- Agrega o elimina tipos de entidades -- Elimina los atributos de los tipos de entidad -- Agrega atributos anulables a los tipos de entidad -- Convierte los atributos no anulables en atributos anulables -- Añade valores a los enums -- Agrega o elimina interfaces -- Cambia para qué tipos de entidades se implementa una interfaz - -> **[La gestión de características](#experimental-features):** `grafting` se declara en `features` en el manifiesto del subgrafo. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> Esto también establece las bases para la indexación determinista de datos off-chain, así como la posible introducción de datos arbitrarios procedentes de HTTP. - -### Descripción - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> Esto sustituye a la API `ipfs.cat` existente - -### Upgrade guide - -#### Actualización de `graph-ts` y `graph-cli` - -Las fuentes de datos de archivos requieren graph-ts >=0.29.0 y graph-cli >=0.33.1 - -#### Añadir un nuevo tipo de entidad que se actualizará cuando se encuentren archivos - -Las fuentes de datos de archivos no pueden acceder a entidades basadas en cadenas ni actualizarlas, pero deben actualizar entidades específicas de archivos. - -Esto puede significar dividir campos de entidades existentes en entidades separadas, vinculadas entre sí. - -Entidad combinada original: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Nueva, entidad dividida: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Si la relación es 1:1 entre la entidad padre y la entidad fuente de datos de archivo resultante, el patrón más sencillo es vincular la entidad padre a una entidad de archivo resultante utilizando el CID IPFS como búsqueda. Pónte en contacto con nosotros en Discord si tienes dificultades para modelar tus nuevas entidades basadas en archivos! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -Esta es la fuente de datos que se generará cuando se identifique un archivo de interés. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Actualmente se requieren `abis`, aunque no es posible llamar a los contratos desde las fuentes de datos de los archivos - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Crear un nuevo handler para procesar archivos - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -Se puede acceder al CID del archivo como un string legible a través del `dataSource` de la siguiente manera: - -```typescript -const cid = dataSource.stringParam() -``` - -Ejemplo de handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Generar fuentes de datos de archivos cuando sea necesario - -Ahora puedes crear fuentes de datos de archivos durante la ejecución de handlers basados en cadenas: - -- Importar la plantilla desde los `templates` autogenerados -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Ejemplo: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -En este ejemplo se utiliza el CID como búsqueda entre la entidad `Token` principal y la entidad `TokenMetadata` resultante. - -> Anteriormente, este es el punto en el que un developer de subgrafos habría llamado a `ipfs.cat(CID)` para obtener el archivo - -¡Felicitaciones, estás utilizando fuentes de datos de archivos! - -#### Deploy de tus subgrafos - -Ya puedes `build` y `deploy` tu subgrafo en cualquier Graph Node >=v0.30.0-rc.0. - -#### Limitaciones - -Los handlers y entidades de fuentes de datos de archivos están aislados de otras entidades del subgrafo, asegurando que son deterministas cuando se ejecutan, y asegurando que no se contaminan las fuentes de datos basadas en cadenas. En concreto: - -- Las entidades creadas por File Data Sources son inmutables y no pueden actualizarse -- Los handlers de File Data Source no pueden acceder a entidades de otras fuentes de datos de archivos -- Los handlers basados en cadenas no pueden acceder a las entidades asociadas a File Data Sources - -> Aunque esta restricción no debería ser problemática para la mayoría de los casos de uso, puede introducir complejidad para algunos. Si tienes problemas para modelar tus datos basados en archivos en un subgrafo, ponte en contacto con nosotros a través de Discord! - -Además, no es posible crear fuentes de datos a partir de una File Data Source, ya sea una fuente de datos on-chain u otra File Data Source. Es posible que esta restricción se elimine en el futuro. - -#### Mejores Prácticas - -Si estás vinculando metadatos NFT a los tokens correspondientes, utiliza el hash IPFS de los metadatos para hacer referencia a una entidad Metadata desde la entidad Token. Guarda la entidad de metadatos utilizando el hash IPFS como ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -Si tienes entidades que se actualizan varias veces, crea entidades únicas basadas en archivos utilizando el hash IPFS & el ID de la entidad, y haz referencia a ellas utilizando un campo derivado en la entidad basada en cadena. - -> Estamos trabajando para mejorar la recomendación anterior, de modo que las consultas sólo devuelvan la versión "más reciente" - -#### Problemas conocidos - -File Data Sources requieren actualmente ABIs, aunque no se utilicen ABIs ([problema](https://github.com/graphprotocol/graph-cli/issues/961)). La solución es añadir cualquier ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Ejemplos - -[Migración de subgrafo Crypto Coven](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Referencias - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/es/developing/creating-a-subgraph/_meta.js b/website/pages/es/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/es/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/es/developing/graph-ts/_meta.js b/website/pages/es/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/es/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/es/managing/deprecate-a-subgraph.mdx b/website/pages/es/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/es/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/es/mips-faqs.mdx b/website/pages/es/mips-faqs.mdx deleted file mode 100644 index e0a60ea776d5..000000000000 --- a/website/pages/es/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Preguntas Frecuentes sobre MIPs ---- - -## Introducción - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/es/network/_meta.js b/website/pages/es/network/_meta.js index 91d957d25ec8..49858537c885 100644 --- a/website/pages/es/network/_meta.js +++ b/website/pages/es/network/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - overview: 'Descripción', } diff --git a/website/pages/es/querying/_meta.js b/website/pages/es/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/es/querying/_meta.js +++ b/website/pages/es/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/es/querying/graph-client/_meta.js b/website/pages/es/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/es/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/fr/_meta.js b/website/pages/fr/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/fr/_meta.js +++ b/website/pages/fr/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 39d85164f0f6..000000000000 --- a/website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Déploiement d'un subgraph dans le service hébergé ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Stocker le jeton d'accès - -Après avoir créé un compte, accédez à votre [tableau de bord](https://thegraph.com/hosted-service/dashboard). Copiez le jeton d'accès affiché sur le tableau de bord et exécutez `graph auth --product hosted-service `. Le jeton d'accès sera ainsi stocké sur votre ordinateur. Vous ne devez effectuer cette opération qu'une seule fois, ou si vous régénérez le jeton d'accès. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Sélectionnez une image à utiliser comme image de prévisualisation et comme vignette pour le subgraph. - -**Nom du sous-graphe** - Avec le nom du compte sous lequel le sous-graphe est créé, ce champ définit également le nom de style `nom du compte/nom du sous-graphe` utilisé pour les déploiements et les points de terminaison GraphQL. _Ce champ ne peut pas être modifié ultérieurement._ - -**Compte** : le compte sous lequel le subgraph est créé. Il peut s'agir du compte d'un individu ou d'une organisation. _Les subgraphs ne pourront pas être déplacés ultérieurement entre les comptes._ - -**Sous-titre** : texte qui apparaîtra dans les cartes subgraphs. - -**Description** - Description du Subgraph, visible sur la page de détails du subgraph. - -**GitHub URL** - Lien vers le dépôt du subgraph sur GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -Vous déployez le subgraph en exécutant `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -L'état du subgraph passe à `Synced` une fois que le nœud the Graph a extrait toutes les données des blocs historiques. Le nœud de the Graph continuera à inspecter les blocs de votre subgraph au fur et à mesure que ces blocs seront exploités. - -## Redéployer un Subgraph - -Lorsque vous apportez des modifications à la définition de votre subgraph, par exemple pour corriger un problème dans les mappages d'entités, exécutez à nouveau la commande `yarn deploy` ci-dessus pour déployer la version mise à jour de votre subgraph. - -Si votre subgraph précédemment déployé est toujours en statut `Synchronisation`, il sera immédiatement remplacé par la version nouvellement déployée. Si le subgraph précédemment déployé est déjà entièrement synchronisé, Graph Node marquera la nouvelle version déployée comme `Version en attente`, la synchronisera en arrière-plan et ne remplacera la version actuellement déployée par la nouvelle qu'une fois la synchronisation de la nouvelle version terminée. Cela permet de s'assurer que vous disposez d'un subgraph avec lequel travailler pendant la synchronisation de la nouvelle version. - -## Déploiement du subgraph sur plusieurs réseaux - -Dans certains cas, vous souhaiterez déployer le même subgraph sur plusieurs réseaux sans dupliquer tout son code. Le principal défi qui en découle est que les adresses contractuelles sur ces réseaux sont différentes. - -### Utiliser graph-cli - -Le `graph build` (depuis `v0.29.0`) et le `graph deploy` (depuis `v0.32.0`) acceptent deux nouvelles options : - -```sh -Options: - - ... - --network Configuration du réseau à utiliser à partir du fichier de configuration des réseaux - --network-file Chemin du fichier de configuration des réseaux (par défaut : "./networks.json")" -``` - -Vous pouvez utiliser l'option `--network` pour spécifier une configuration de réseau à partir d'un fichier standard `json` (par défaut `networks.json`) afin de mettre à jour facilement votre subgraph pendant le développement. - -**Remarque :** La commande `init` générera désormais automatiquement un `networks.json` basé sur les informations fournies. Vous pourrez alors mettre à jour des réseaux existants ou ajouter des réseaux supplémentaires. - -Si vous n'avez pas de fichier `networks.json`, vous devrez en créer un manuellement avec la structure suivante : - -```json -{ - "network1": { // le nom du réseau - "dataSource1": { // le nom de la source de données - "address": "0xabc...", // l'adresse du contrat (facultatif) - "startBlock": 123456 // le bloc de départ (facultatif) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** Vous n'avez pas à spécifier les `modèles` (si vous en avez) dans le fichier de configuration, seulement les `dataSources`. S'il existe des `modèles` déclarés dans le fichier `subgraph.yaml`, leur réseau sera automatiquement mis à jour avec celui spécifié avec l'option `--network`. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Voici à quoi devrait ressembler votre fichier de configuration réseau : - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Nous pouvons maintenant exécuter l'une des commandes suivantes : - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Vous êtes maintenant prêt à `yarn deploy`. - -**Remarque :** Comme mentionné précédemment, depuis `graph-cli 0.32.0` vous pouvez exécuter directement `yarn deploy` avec le `--network` option : - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Utilisation du modèle subgraph.yaml - -Une solution pour les anciennes versions de graph-cli qui permet de paramétrer des aspects tels que les adresses de contrat consiste à en générer des parties à l'aide d'un système de modèles comme [Mustache](https://mustache.github.io/) ou [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -et - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Parallèlement à cela, vous remplaceriez le nom et les adresses du réseau dans le manifeste par des espaces réservés variables `{{network}}` et `{{address}}` et renommez le manifeste par exemple. `subgraph.template.yaml` : - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Afin de générer un manifeste sur l'un ou l'autre des réseaux, vous pouvez ajouter deux commandes supplémentaires à `package.json` ainsi qu'une dépendance sur `mustache` : - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -Un exemple concret de ce type d'action peut être trouvé [ici](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** Cette approche peut également être appliquée à des situations plus complexes, lorsqu'il est nécessaire de substituer plus que des adresses contractuelles et des noms de réseau ou de générer des mappings ou des ABI à partir de modèles. - -## Vérification de l'état des subgraphs - -Si un subgraph se synchronise avec succès, c'est un bon signe qu'il continuera à bien fonctionner pour toujours. Cependant, de nouveaux déclencheurs sur le réseau peuvent amener votre subgraph à rencontrer une condition d'erreur non testée ou il peut commencer à prendre du retard en raison de problèmes de performances ou de problèmes avec les opérateurs de nœuds. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -Cela vous donnera le `chainHeadBlock` que vous pouvez comparer avec le `latestBlock` de votre subgraph pour vérifier s'il est en retard. `synced` indique si le subgraph a déjà rattrapé la chaîne. `health` peut actuellement prendre les valeurs `healthy` si aucune erreur ne s'est produite, ou `failed` si une erreur a interrompu la progression du subgraph. Dans ce cas, vous pouvez consulter le champ `fatalError` pour obtenir des détails sur cette erreur. - -## Politique d'archivage des subgraphs des services hébergés - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Politique d'archivage des subgraphs de Subgraph Studio - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Chaque subgraph concerné par cette politique dispose d'une option de restauration de la version en question. diff --git a/website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index b21b52a461fb..000000000000 --- a/website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Déploiement d'un subgraph dans Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Faire l'installation de The Graph CLI (avec yarn ou npm) -- Créez votre subgraph dans Subgraph Studio -- Authentifier votre compte à partir de l'interface CLI -- Déploiement d'un subgraph dans Subgraph Studio - -## Installation de Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Installer avec yarn :** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Installer avec npm :** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Créez votre subgraph dans Subgraph Studio - -Avant de déployer votre subgraph, vous devez créer un subgraph dans [Subgraph Studio](https://thegraph.com/studio/). Nous vous recommandons de lire notre [documentation sur le sujet](/deploying/subgraph-studio) pour en savoir plus. - -## Initialisation de votre subgraph - -Une fois que votre subgraph a été créé dans Subgraph Studio, vous pouvez initialiser le code du subgraph en utilisant cette commande : - -```bash -graph init --studio -``` - -La valeur `` se trouve sur la page de détails de votre subgraph dans Subgraph Studio : - -![Subgraph Studio - Identifiant](/img/doc-subgraph-slug.png) - -Après avoir exécuté `graph init`, il vous sera demandé de saisir l'adresse du contrat, le réseau et l'ABI que vous souhaitez interroger. Cette opération générera un nouveau dossier sur votre machine locale avec un code de base pour commencer à travailler sur votre subgraph. Vous pouvez ensuite finaliser votre subgraph pour vous assurer qu'il fonctionne comme prévu. - -## Authentification The Graph - -Avant de pouvoir déployer votre subgraph dans Subgraph Studio, vous devez vous connecter à votre compte dans le CLI. Pour ce faire, vous aurez besoin de votre clé de déploiement que vous pouvez trouver sur votre page "Mes subgraphs" ou sur la page de détails de votre subgraphs. - -Voici la commande que vous devez utiliser pour vous authentifier depuis le CLI : - -```bash -graph auth --studio -``` - -## Déploiement d'un subgraph dans Subgraph Studio - -Une fois que vous êtes prêt, vous pouvez déployer votre subgraph dans Subgraph Studio. Cette opération ne publiera pas votre subgraph sur le réseau décentralisé, elle le déploiera uniquement sur votre compte Studio, où vous pourrez le tester et mettre à jour les métadonnées. - -Voici la commande CLI que vous devez utiliser pour déployer votre subgraph. - -```bash -graph deploy --studio -``` - -Après avoir exécuté cette commande, le CLI vous demandera un label de version, vous pouvez le nommer comme vous le souhaitez, vous pouvez utiliser des labels tels que `0.1` et `0.2` ou utiliser également des lettres telles que `uniswap-v2-0.1`. Ces étiquettes seront visibles dans Graph Explorer et peuvent être utilisées par les curateurs pour décider s'ils veulent signaler cette version ou non, alors choisissez-les judicieusement. - -Une fois déployé, vous pouvez tester votre subgraph dans le Subgraph Studio en utilisant le playground, déployer une autre version si nécessaire, mettre à jour les métadonnées et, lorsque vous êtes prêt, publier votre subgraph sur le Graph Explorer. diff --git a/website/pages/fr/deploying/hosted-service.mdx b/website/pages/fr/deploying/hosted-service.mdx deleted file mode 100644 index 0d566466e0e2..000000000000 --- a/website/pages/fr/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Qu'est-ce que le Service Héberge ? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -Cette section vous guidera dans le déploiement d'un subgraph sur le [service hébergé](https://thegraph.com/hosted-service/). - -Si vous n'avez pas de compte sur le service hébergé, vous pouvez vous inscrire avec votre compte GitHub. Une fois authentifié, vous pouvez commencer à créer des subgraphs via l'interface utilisateur et les déployer depuis votre terminal. Le service hébergé supporte un certain nombre de réseaux, tels que Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, et plus encore. - -Pour une liste complète, voir [Réseaux pris en charge](/developing/supported-networks/#hosted-service). - -## Créer un subgraph - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### À partir d'un contrat existant - -Si vous disposez déjà d'un contrat intelligent déployé sur le réseau de votre choix, l'amorçage d'un nouveau subgraph à partir de ce contrat peut être un bon moyen de commencer à utiliser le service hébergé. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -Le `` dans ce cas est le nom de votre utilisateur ou de votre organisation GitHub, `` est le nom de votre subgraph et `<DIRECTORY>< /code> est le nom facultatif du répertoire dans lequel graph init` placera l'exemple de manifeste de subgraph. Le `` est l'adresse de votre contrat existant. `` est le nom du réseau sur lequel le contrat réside. `` est un chemin local vers un fichier ABI de contrat. **`--network`-- et `--abi` -- sont facultatifs.** - -### À partir d'un exemple de subgraph - -Le second mode `graph init` prend en charge est la création d'un nouveau projet à partir d'un exemple de subgraph. La commande suivante le fait : - -``` -graph init --par-exemple --produit-service hébergé / [] -``` - -Le subgraph d'exemple est basé sur le contrat Gravity de Dani Grant qui gère les avatars d'utilisateurs et émet des événements `NewGravatar` ou `UpdateGravatar` chaque fois que des avatars sont créés ou mis à jour. Le subgraph gère ces événements en créant des entités `Gravatar` dans le stockage des nœuds de The Graph et en veillant à ce qu'elles soient mises à jour en fonction des événements. Continuez sur le [manifeste de subgraphs](/developing/creating-a-subgraph#the-subgraph-manifest) pour mieux comprendre les événements de vos contrats intelligents auxquels prêter attention, les mappages, etc. - -### À partir d'un contract proxy - -Pour créer un subgraph adapté au suivi d'un contract proxy, initialisez le subgraph en spécifiant l'adresse du contrat de mise en œuvre. Une fois le processus d'initialisation terminé, la dernière étape consiste à mettre à jour le nom du réseau dans le fichier subgraph.yaml à l'adresse d'un contract proxy. Vous pouvez utiliser la commande ci-dessous. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Réseaux pris en charge par le service hébergé - -Vous pouvez trouver la liste des réseaux supportés [ici](/developing/supported-networks). diff --git a/website/pages/fr/deploying/subgraph-studio.mdx b/website/pages/fr/deploying/subgraph-studio.mdx deleted file mode 100644 index d44f3c17c6f6..000000000000 --- a/website/pages/fr/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Bienvenue dans votre nouveau site de Lancement 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Créer un subgraphe via l'interface utilisateur de Studio -- Déployer un subgraphe à l'aide de la CLI -- Publier un subgraph avec l'interface utilisateur de Studio -- Testez-le dans le playground -- Intégrez-le dans l'environnement de préproduction à l'aide de l'URL de requête -- Créez et gérez vos clés API pour des subgraphs spécifiques - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Les requêtes des subgraphs génère des frais d'interrogation, utilisés pour récompenser les indexeurs [Indexeurs](/network/indexing) sur le réseau Graph. Si vous êtes un développeur dapp ou un développeur de subgraphs, le Studio vous permettra de créer de meilleurs subgraphs pour alimenter vos subgraphs ou celles de votre communauté. Le Studio est composé de 5 parties principales : - -- Contrôles de votre compte utilisateur -- Une liste de subgraphs que vous avez créés -- Une section pour gérer, afficher les détails et visualiser l'état d'un subgraph spécifique -- Une section pour gérer vos clés API dont vous aurez besoin pour interroger un subgraph -- Une section pour gérer votre facturation - -## Comment créer votre compte - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Dès que vous êtes connecté, vous verrez votre clé de déploiement unique sur la page d'accueil de votre compte. Cela vous permettra soit de publier vos subgraphs, soit de gérer vos clés API + facturation. Vous disposerez d'une clé de déploiement unique qui pourra être regénérée si vous pensez qu'elle a été compromise. - -## Comment créer un subgraph dans Subgraph Studio - - - -## Compatibilité des subgraphs avec le réseau de The Graph - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Indexer un [réseau supporté](/developing/supported-networks) -- Ne doit utiliser aucune des fonctionnalités suivantes : - - ipfs.cat & ipfs.map - - Erreurs non fatales - - La greffe - -Plus de fonctions & de réseaux seront ajoutés progressivement au réseau Graph. - -### Flux du cycle de vie des subgraphs - -![Flux du cycle de vie des subgraphes](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Tester votre subgraph dans Subgraph Studio - -Si vous voulez tester votre subgraph avant de le publier sur le réseau, vous pouvez le faire dans le subgraph **Playground (aire de jeu)** ou consulter vos journaux de bord. Les journaux de subgraph vous indiqueront **où** votre subgraph échoue. - -## Publiez votre subgraph dans Subgraph Studio - -Vous êtes arrivé jusqu'ici - félicitations ! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Jetez un œil à l'aperçu vidéo ci-dessous également : - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Les indexeurs doivent soumettre des enregistrements obligatoires de preuve d'indexation à partir d'un hash de bloc spécifique. Parce que la publication d'un subgraph est une action effectuée sur la blockchain, n'oubliez pas que la transaction peut prendre jusqu'à quelques minutes pour être complétée. Toute adresse que vous utilisez pour publier le contrat sera la seule à pouvoir publier les futures versions. Choisissez judicieusement ! - -Les subgraphes avec des signaux de curation sont présentés aux indexeurs afin qu'ils puissent être indexés sur le réseau décentralisé. Vous pouvez publier des subgraphs et des signaux en une seule transaction, ce qui vous permet de générer le premier signal de curation sur le subraphe et d'économiser sur les coûts de gaz. En ajoutant votre signal au signal fourni ultérieurement par les curateurs, votre subgraph aura également plus de chances de répondre aux requêtes. - -**Maintenant que vous avez publié votre subgraph, voyons comment vous allez le gérer régulièrement.** Notez que vous ne pouvez pas publier votre subgraph sur le réseau si la synchronisation a échoué. Cela est généralement dû au fait que le subgraphe contient des bogues - les journaux vous indiqueront où se trouvent ces problèmes! - -## Versionner votre subgraph avec la CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Veuillez noter qu'il y a des coûts associés à la publication d'une nouvelle version d'un subgraph sur le réseau. En plus des frais de transaction, les développeurs doivent également financer une partie de la taxe de curation sur le signal d'automigration. Vous ne pouvez pas publier une nouvelle version de votre subgraph si les curateurs ne l'ont pas signalé. Pour plus d'informations sur les risques de curation, veuillez en savoir plus [ici](/network/curating). - -### Archivage automatique des versions de subgraphs - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Désarchiver](/img/Unarchive.png) diff --git a/website/pages/fr/developing/creating-a-subgraph.mdx b/website/pages/fr/developing/creating-a-subgraph.mdx deleted file mode 100644 index b3a27f97546e..000000000000 --- a/website/pages/fr/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Comment créer un subgraph ---- - -Un subgraph récupère des données depuis une blockchain, les manipule puis les enregistre afin que ces données soient aisément accessibles via GraphQL. - -![Définition d'un subgraph](/img/defining-a-subgraph.png) - -Un subgraph se constitue des fichiers suivants : - -- `subgraph.yaml` : un fichier YAML qui contient le manifeste du subgraph - -- `schema.graphql`: un schéma GraphQL qui définit les données stockées pour votre subgraph et comment les interroger via GraphQL - -- `Mappages AssemblyScript` : [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) qui traduit les données d'événement en entités définies dans votre schéma (par exemple `mapping.ts` dans ce tutoriel) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Installation du Graph CLI - -La CLI Graph est écrite en JavaScript et vous devrez installer soit `yarn` ou `npm` pour l'utiliser ; on suppose que vous avez du fil dans ce qui suit. - -Une fois que vous avez `yarn`, installez la CLI Graph en exécutant - -**Installation avec yarn :** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -**Installation avec npm :** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## D'un contrat existant - -La commande suivante crée un subgraph qui indexe tous les événements d'un contrat existant. Il essaie de récupérer l'ABI du contrat via Etherscan et utilise un chemin de fichier local en cas d'échec. Si l'un des arguments facultatifs manque, il vous guide à travers un formulaire interactif. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` est l'ID de votre subgraph dans Subgraph Studio, il peut être trouvé sur la page d'information de votre subgraph. - -## A partir d'un exemple de subgraph - -Le second mode `graph init` prend en charge est la création d'un nouveau projet à partir d'un exemple de subgraph. La commande suivante le fait : - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Ajouter de nouvelles sources de données à un subgraph existant - -Depuis `v0.31.0`, le `graph-cli` prend en charge l'ajout de nouvelles sources de données à un subgraph existant via la commande `graph add`. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -La commande `add` récupérera l'ABI depuis Etherscan (sauf si un chemin ABI est spécifié avec l'option `--abi`) et créera une nouvelle `dataSource` de la même manière que la commande `graph init` crée un `dataSource` `--from-contract`, mettant à jour le schéma et les mappages en conséquence. - -L'option `--merge-entities` identifie la façon dont le développeur souhaite gérer les conflits de noms d'`entité` et d'`événement` : - -- Si `true` : le nouveau `dataSource` doit utiliser les `eventHandlers` & `entités`. -- Si `false` : une nouvelle entité & le gestionnaire d'événements doit être créé avec `${dataSourceName}{EventName}`. - -L'`adresse` du contrat sera écrite dans le `networks.json` du réseau concerné. - -> **Remarque :** Lorsque vous utilisez la Cli interactive, après avoir exécuté avec succès `graph init`, vous serez invité à ajouter une nouvelle `dataSource`. - -## Le manifeste du subgraph - -Le manifeste du subgraph `subgraph.yaml` définit les contrats intelligents que votre subgraph indexe, les événements de ces contrats auxquels prêter attention et comment mapper les données d'événements aux entités que Graph Node stocke et permet d'interroger. La spécification complète des manifestes de subgraphs peut être trouvée [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -Pour l'exemple de subgraph, `subgraph.yaml` est : - -```yaml -version spec : 0.0.4 -description : Gravatar pour Ethereum -référentiel : https://github.com/graphprotocol/graph-tooling -schéma: - fichier : ./schema.graphql -indexeurConseils : - tailler : automatique -les sources de données: - - genre : ethereum/contrat - nom: Gravité - réseau : réseau principal - source: - adresse : '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi : Gravité - bloc de démarrage : 6175244 - bloc de fin : 7175245 - contexte: - foo : - tapez : Booléen - données : vrai - bar: - tapez : chaîne - données : 'barre' - cartographie : - genre : ethereum/événements - Version api : 0.0.6 - langage : wasm/assemblyscript - entités : - -Gravatar - abis : - - nom : Gravité - fichier : ./abis/Gravity.json - Gestionnaires d'événements : - - événement : NewGravatar(uint256,adresse,chaîne,chaîne) - gestionnaire : handleNewGravatar - - événement : UpdatedGravatar (uint256, adresse, chaîne, chaîne) - gestionnaire : handleUpdatedGravatar - Gestionnaires d'appels : - - fonction : createGravatar(string,string) - gestionnaire : handleCreateGravatar - gestionnaires de blocs : - - gestionnaire : handleBlock - - gestionnaire : handleBlockWithCall - filtre: - genre : appeler - fichier : ./src/mapping.ts -``` - -Les entrées importantes à mettre à jour pour le manifeste sont : - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `fonctionnalités` : une liste de tous les noms de [fonctionnalités](#experimental-features) utilisés. - -- `indexerHints.prune` : Définit la conservation des données de blocs historiques pour un subgraph. Voir [prune](#prune) dans la section [indexerHints](#indexer-hints). - -- `dataSources.source` : l'adresse du contrat intelligent, les sources du sous-graphe, et l'ABI du contrat intelligent à utiliser. L'adresse est facultative ; son omission permet d'indexer les événements correspondants de tous les contrats. - -- `dataSources.source` : l'adresse du contrat intelligent, les sources du subgraph, et l'Abi du contrat intelligent à utiliser. L'adresse est facultative ; son omission permet d'indexer les événements correspondants de tous les contrats. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context` : paires clé-valeur qui peuvent être utilisées dans les mappages de subgraphs. Prend en charge différents types de données comme `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Octets`, `Liste` et `BigInt`. Chaque variable doit spécifier son `type` et ses `données`. Ces variables de contexte sont ensuite accessibles dans les fichiers de mappage, offrant des options plus configurables pour le développement de subgraphs. - -- `dataSources.mapping.entities` : les entités que la source de données écrit dans le magasin. Le schéma de chaque entité est défini dans le fichier schema.graphql. - -- `dataSources.mapping.abis` : un ou plusieurs fichiers ABI nommés pour le contrat source ainsi que tout autre contrat intelligent avec lequel vous interagissez à partir des mappages. - -- `dataSources.mapping.eventHandlers` : répertorie les événements de contrat intelligent auxquels ce subgraph réagit et les gestionnaires du mappage —./src/mapping.ts dans l'exemple qui transforment ces événements en entités dans le magasin. - -- `dataSources.mapping.callHandlers` : répertorie les fonctions de contrat intelligent auxquelles ce smubgraph réagit et les gestionnaires du mappage qui transforment les entrées et sorties en appels de fonction en entités dans le magasin. - -- `dataSources.mapping.blockHandlers` : répertorie les blocs auxquels ce subgraph réagit et les gestionnaires du mappage à exécuter lorsqu'un bloc est ajouté à la chaîne. Sans filtre, le gestionnaire de bloc sera exécuté à chaque bloc. Un filtre d'appel facultatif peut être fourni en ajoutant un champ `filter` avec `kind: call` au gestionnaire. Cela n'exécutera le gestionnaire que si le bloc contient au moins un appel au contrat de source de données. - -Un seul subgraph peut indexer les données de plusieurs contrats intelligents. Ajoutez une entrée pour chaque contrat à partir duquel les données doivent être indexées dans le tableau `dataSources`. - -### Ordre de déclenchement des gestionnaires - -Les déclencheurs d'une source de données au sein d'un bloc sont classés à l'aide du processus suivant : - -1. Les déclencheurs d'événements et d'appels sont d'abord classés par index de transaction au sein du bloc. -2. Les déclencheurs d'événements et d'appels au sein d'une même transaction sont classés selon une convention : les déclencheurs d'événements d'abord, puis les déclencheurs d'appel, chaque type respectant l'ordre dans lequel ils sont définis dans le manifeste. -3. Les déclencheurs de bloc sont exécutés après les déclencheurs d'événement et d'appel, dans l'ordre dans lequel ils sont définis dans le manifeste. - -Ces règles de commande sont susceptibles de changer. - -> **Note:** Lorsque de nouveaux [sources de données dynamiques](#data-source-templates-for-dynamically-created-contracts) sont créés, les gestionnaires définis pour les sources de données dynamiques ne commenceront à être traités qu'une fois que tous les gestionnaires de sources de données existants auront été traités, et se répéteront dans la même séquence chaque fois qu'ils seront déclenchés. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Notes de version | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Obtenir les ABI - -Le(s) fichier(s) ABI doivent correspondre à votre(vos) contrat(s). Il existe plusieurs façons d'obtenir des fichiers ABI : - -- Si vous construisez votre propre projet, vous aurez probablement accès à vos ABI les plus récents. -- Si vous créez un subgraph pour un projet public, vous pouvez télécharger ce projet sur votre ordinateur et obtenir l'ABI en utilisant la [`compilation truffle `](https://truffleframework.com/docs/truffle/overview) ou en utilisant solc pour compiler. -- Vous pouvez également trouver l'ABI sur [Etherscan](https://etherscan.io/), mais ce n'est pas toujours fiable, car l'ABI qui y est téléchargé peut être obsolète. Assurez-vous d'avoir le bon ABI, sinon l'exécution de votre subgraph échouera. - -## Le Schema GraphQL - -Le schéma de votre subgraph se trouve dans le fichier `schema.graphql`. Les schémas GraphQL sont définis à l'aide du langage de définition d'interface GraphQL. Si vous n'avez jamais écrit de schéma GraphQL, il est recommandé de consulter cette introduction sur le système de types GraphQL. La documentation de référence pour les schémas GraphQL est disponible dans la section [API GraphQL](/querying/graphql-api). - -## Définir des entités - -Avant de définir des entités, il est important de prendre du recul et de réfléchir à la manière dont vos données sont structurées et liées. Toutes les requêtes seront effectuées sur le modèle de données défini dans le schéma du subgraph et les entités indexées par le subgraph. Pour cette raison, il est bon de définir le schéma du subgraph d'une manière qui correspond aux besoins de votre dapp. Il peut être utile d'imaginer les entités comme des « objets contenant des données », plutôt que comme des événements ou des fonctions. - -Avec The Graph, vous définissez simplement les types d'entités dans `schema.graphql`, et Graph Node générera des champs de niveau supérieur pour interroger des instances uniques et des collections de ce type d'entité. Chaque type qui doit être une entité doit être annoté avec une directive `@entity`. Par défaut, les entités sont mutables, ce qui signifie que les mappages peuvent charger des entités existantes, les modifier et stocker une nouvelle version de cette entité. La mutabilité a un prix, et pour les types d'entités dont on sait qu'elles ne seront jamais modifiées, par exemple parce qu'elles contiennent simplement des données extraites textuellement de la chaîne, il est recommandé de les marquer comme immuables avec `@entity (immuable : vrai)`. Les mappages peuvent apporter des modifications aux entités immuables tant que ces modifications se produisent dans le même bloc dans lequel l'entité a été créée. Les entités immuables sont beaucoup plus rapides à écrire et à interroger et doivent donc être utilisées autant que possible. - -### Bon exemple - -L'entité `Gravatar` ci-dessous est structurée autour d'un objet Gravatar et constitue un bon exemple de la façon dont une entité pourrait être définie. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Mauvais exemple - -Les exemples d'entités `GravatarAccepted` et `GravatarDeclined` ci-dessous sont basés sur des événements. Il n'est pas recommandé de mapper des événements ou des appels de fonction à des entités 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Champs facultatifs et obligatoires - -Les champs d'entité peuvent être définis comme obligatoires ou facultatifs. Les champs obligatoires sont indiqués par le `!` dans le schéma. Si un champ obligatoire n'est pas défini dans le mappage, vous recevrez cette erreur lors de l'interrogation du champ : - -``` -Null value resolved for non-null field 'name' -``` - -Chaque entité doit avoir un champ `id`, qui doit être de type `Bytes !` ou `String !`. Il est généralement recommandé d'utiliser `Bytes !`, à moins que l'`identifiant` ne contienne du texte lisible par l'homme, car les entités avec des identifiants `Bytes !` seront plus rapides à écrire. et interrogez comme ceux avec un `String!` `id`. Le champ `id` sert de clé primaire et doit être unique parmi toutes les entités du même type. Pour des raisons historiques, le type `ID!` est également accepté et est synonyme de `String!`. - -Pour certains types d'entités, l'`id` est construit à partir des identifiants de deux autres entités ; cela est possible en utilisant `concat`, par exemple `let id = left.id.concat(right.id)` pour former l'identifiant à partir des identifiants de `gauche et à droite`. De même, pour construire un identifiant à partir de l'identifiant d'une entité existante et d'un compteur `count`, `let id = left.id.concatI32(count)` peut être utilisé. La concaténation est garantie pour produire des identifiants uniques tant que la longueur de `left` est la même pour toutes ces entités, par exemple, parce que `left.id` est une `adresse `. - -### Types scalaires intégrés - -#### Scalaires pris en charge par GraphQL - -Nous prenons en charge les scalaires suivants dans notre API GraphQL : - -| Type | Description | -| --- | --- | -| `Octets` | Tableau d'octets, représenté sous forme de chaîne hexadécimale. Couramment utilisé pour les hachages et adresses Ethereum. | -| `String` | Scalaire pour les valeurs `chaîne`. Les caractères nuls ne sont pas pris en charge et sont automatiquement supprimés. | -| `Boolean` | Scalar pour `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Grands entiers. Utilisé pour les types `uint32`, `int64`, `uint64`, ..., `uint256` d'Ethereum. Remarque : Tout ce qui se trouve en dessous de `uint32`, tel que `int32`, `uint24` ou `int8`, est représenté par `i32 et bio`. Accédez à [API GraphQL - Requêtes](/querying/graphql-api#queries) pour une description de l'API de recherche en texte intégral et d'autres exemples d'utilisation. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Gestion des fonctionnalités](#experimental-features) :** À partir de `specVersion` `0.0.4` et au-delà, `fullTextSearch` doit être déclaré sous la section `fonctionnalités` dans le manifeste du subgraph. - -### Langues prises en charge - -Le choix d'une langue différente aura un effet définitif, bien que parfois subtil, sur l'API de recherche en texte intégral. Les champs couverts par un champ de requête en texte intégral sont examinés dans le contexte de la langue choisie, de sorte que les lexèmes produits par les requêtes d'analyse et de recherche varient d'une langue à l'autre. Par exemple : lorsque vous utilisez le dictionnaire turc pris en charge, "token" est dérivé de "toke", tandis que, bien sûr, le dictionnaire anglais le dérivera de "token". - -Dictionnaires de langues pris en charge : - -| Code | Dictionnaire | -| ------ | ------------ | -| simple | Général | -| da | Danois | -| nl | Néerlandais | -| en | Anglais | -| fi | Finlandais | -| fr | Français | -| de | Allemand | -| hu | Hongrois | -| it | Italien | -| no | Norvégien | -| pt | Portugais | -| ro | Roumain | -| ru | Russe | -| es | Espagnol | -| sv | Suédois | -| tr | Turc | - -### Algorithmes de classement - -Algorithmes de classement: - -| Algorithme | Description | -| --- | --- | -| rang | Utilisez la qualité de correspondance (0-1) de la requête en texte intégral pour trier les résultats. | -| proximitéRang | Similaire au classement mais inclut également la proximité des matchs. | - -## Écriture de mappages - -Les mappages prennent les données d'une source particulière et les transforment en entités définies dans votre schéma. Les mappages sont écrits dans un sous-ensemble de [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) appelé [AssemblyScript](https : //github.com/AssemblyScript/assemblyscript/wiki) qui peut être compilé en WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript est plus strict que TypeScript normal, mais fournit une syntaxe familière. - -Pour chaque gestionnaire d'événements défini dans `subgraph.yaml` sous `mapping.eventHandlers`, créez une fonction exportée du même nom. Chaque gestionnaire doit accepter un seul paramètre appelé `event` avec un type correspondant au nom de l'événement qui est géré. - -Dans le subgraph d'exemple, `src/mapping.ts` contient des gestionnaires pour les événements `NewGravatar` et `UpdatedGravatar` : - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -Le premier gestionnaire prend un événement `NewGravatar` et crée une nouvelle entité `Gravatar` avec `new Gravatar(event.params.id.toHex())`, remplissant les champs d'entité en utilisant les paramètres d'événement correspondants. Cette instance d'entité est représentée par la variable `gravatar`, avec une valeur d'identifiant de `event.params.id.toHex()`. - -Le deuxième gestionnaire essaie de charger le `Gravatar` existant à partir du magasin Graph Node. S'il n'existe pas encore, il est créé à la demande. L'entité est ensuite mise à jour pour correspondre aux nouveaux paramètres d'événement avant d'être réenregistrée dans le magasin à l'aide de `gravatar.save()`. - -### ID recommandés pour la création de nouvelles entités - -Il est fortement recommandé d'utiliser `Bytes` pour les champs `id` et de n'utiliser `String` que pour les attributs qui contiennent vraiment du texte lisible par l'homme, comme le nom d'un jeton. Vous trouverez ci-dessous quelques valeurs de `id` à prendre en compte lors de la création de nouvelles entités. - -- `transfer.id = événement.transaction. hachage` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Traitement des entités ayant des identifiants identiques - -Lors de la création et de l'enregistrement d'une nouvelle entité, si une entité avec le même ID existe déjà, les propriétés de la nouvelle entité sont toujours préférées lors du processus de fusion. Cela signifie que l'entité existante sera mise à jour avec les valeurs de la nouvelle entité. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -Si aucune valeur n'est définie pour un champ de la nouvelle entité avec le même ID, le champ aura également la valeur null. - -## Génération de code - -Afin de faciliter et de sécuriser le travail avec les contrats intelligents, les événements et les entités, la CLI Graph peut générer des types AssemblyScript à partir du schéma GraphQL du subgraph et des ABI de contrat inclus dans les sources de données. - -Cela se fait avec - -```sh -graph codegen [--output-dir ] [] -``` - -mais dans la plupart des cas, les subgraphs sont déjà préconfigurés via `package.json` pour vous permettre d'exécuter simplement l'une des opérations suivantes pour obtenir le même résultat : - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Cela générera une classe AssemblyScript pour chaque contrat intelligent dans les fichiers ABI mentionnés dans `subgraph.yaml`, vous permettant de lier ces contrats à des adresses spécifiques dans les mappages et d'appeler des méthodes de contrat en lecture seule contre le bloc en cours. traité. Il générera également une classe pour chaque événement de contrat afin de fournir un accès facile aux paramètres de l'événement, ainsi qu'au bloc et à la transaction d'où provient l'événement. Tous ces types sont écrits dans `//.ts`. Dans le sous-graphe d'exemple, ce serait `generated/Gravity/Gravity.ts`, permettant aux mappages d'importer ces types avec. - -```javascript -import { - // La classe de contrat : - Gravity, - // Les classes d'événements : - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -De plus, une classe est générée pour chaque type d'entité dans le schéma GraphQL du subgraph. Ces classes fournissent un chargement d'entités de type sécurisé, un accès en lecture et en écriture aux champs d'entité ainsi qu'une méthode `save()` pour écrire les entités à stocker. Toutes les classes d'entités sont écrites dans `/schema.ts`, permettant aux mappages de les importer avec - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Remarque :** La génération de code doit être effectuée à nouveau après chaque modification du schéma GraphQL ou des ABI inclus dans le manifeste. Elle doit également être effectuée au moins une fois avant de construire ou de déployer le subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Modèles de sources de données - -Un modèle courant dans les contrats intelligents compatibles EVM est l'utilisation de contrats de registre ou d'usine, dans lesquels un contrat crée, gère ou référence un nombre arbitraire d'autres contrats qui ont chacun leur propre état et leurs propres événements. - -Les adresses de ces sous-traitants peuvent ou non être connues à l'avance et bon nombre de ces contrats peuvent être créés et/ou ajoutés au fil du temps. C'est pourquoi, dans de tels cas, définir une seule source de données ou un nombre fixe de sources de données est impossible et une approche plus dynamique est nécessaire : des _modèles de sources de données_. - -### Source de données pour le contrat principal - -Tout d’abord, vous définissez une source de données régulière pour le contrat principal. L'extrait ci-dessous montre un exemple simplifié de source de données pour le contrat d'usine d'échange [Uniswap](https://uniswap.org). Notez le gestionnaire d'événements `NewExchange(address,address)`. Ceci est émis lorsqu'un nouveau contrat d'échange est créé en chaîne par le contrat d'usine. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Modèles de source de données pour les contrats créés dynamiquement - -Ensuite, vous ajoutez des _modèles de source de données_ au manifeste. Celles-ci sont identiques aux sources de données classiques, sauf qu'il leur manque une adresse de contrat prédéfinie sous `source`. Généralement, vous définirez un modèle pour chaque type de sous-contrat géré ou référencé par le contrat parent. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instanciation d'un modèle de source de données - -Dans la dernière étape, vous mettez à jour votre mappage de contrat principal pour créer une instance de source de données dynamique à partir de l'un des modèles. Dans cet exemple, vous modifieriez le mappage de contrat principal pour importer le modèle `Exchange` et appeleriez la méthode `Exchange.create(address)` dessus pour commencer à indexer le nouveau contrat d'échange. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Commence à indexer l'échange ; `event.params.exchange` est le - // adresse du nouveau contrat d'échange - Exchange.create(event.params.exchange) -} -``` - -> **Remarque :** Une nouvelle source de données traitera uniquement les appels et les événements du bloc dans lequel elle a été créée et de tous les blocs suivants, mais ne traitera pas les données historiques, c'est-à-dire les données. qui est contenu dans les blocs précédents. -> -> Si les blocs précédents contiennent des données pertinentes pour la nouvelle source de données, il est préférable d'indexer ces données en lisant l'état actuel du contrat et en créant des entités représentant cet état au moment de la création de la nouvelle source de données. - -### Data Source Context - -Les contextes de source de données permettent de transmettre une configuration supplémentaire lors de l'instanciation d'un modèle. Dans notre exemple, disons que les échanges sont associés à une paire de transactions particulière, qui est incluse dans l'événement `NewExchange`. Ces informations peuvent être transmises à la source de données instanciée, comme suit : - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -A l'intérieur d'un mappage du modèle `Exchange`, le contexte est alors accessible : - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Il existe des setters et des getters comme `setString` et `getString` pour tous les types de valeur. - -## Blocs de démarrage - -Le `startBlock` est un paramètre facultatif qui vous permet de définir à partir de quel bloc de la chaîne la source de données commencera l'indexation. La définition du bloc de départ permet à la source de données d'ignorer potentiellement des millions de blocs non pertinents. En règle générale, un développeur de subgraphs définira `startBlock` sur le bloc dans lequel le contrat intelligent de la source de données a été créé. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Remarque :** Le bloc de création de contrat peut être rapidement consulté sur Etherscan : -> -> 1. Recherchez le contrat en saisissant son adresse dans la barre de recherche. -> 2. Cliquez sur le hachage de la transaction de création dans la section `Contract Creator`. -> 3. Chargez la page des détails de la transaction où vous trouverez le bloc de départ de ce contrat. - -## Conseils pour l'indexeur - -Le paramètre `indexerHints` dans le manifeste d'un subgraph fournit des directives aux indexeurs sur le traitement et la gestion d'un subgraph. Il influence les décisions opérationnelles concernant la gestion des données, les stratégies d'indexation et les optimisations. Actuellement, il propose l'option `prune` pour gérer la conservation ou l'élagage des données historiques. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune` : définit la conservation des données de bloc historiques pour un subgraph. Les options incluent : - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. Un nombre spécifique : Fixe une limite personnalisée au nombre de blocs historiques à conserver. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> L'utilisation de `"auto"` est généralement recommandée car elle maximise les performances des requêtes et est suffisante pour la plupart des utilisateurs qui n'ont pas besoin d'accéder à de nombreuses données historiques. - -Pour les subgraphs exploitant les [requêtes de voyage dans le temps](/querying/graphql-api/#time-travel-queries), il est conseillé soit de définir un nombre spécifique de blocs pour la conservation des données historiques, soit d'utiliser ` prune : never` pour conserver tous les états historiques des entités. Vous trouverez ci-dessous des exemples de configuration des deux options dans les paramètres de votre subgraph : - -Pour conserver une quantité spécifique de données historiques : - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -Préserver l'histoire complète des États de l'entité : - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Gestionnaires d'appels - -Bien que les événements constituent un moyen efficace de collecter les modifications pertinentes apportées à l'état d'un contrat, de nombreux contrats évitent de générer des journaux pour optimiser les coûts du gaz. Dans ces cas, un subgraph peut s'abonner aux appels effectués vers le contrat de source de données. Ceci est réalisé en définissant des gestionnaires d'appels faisant référence à la signature de la fonction et au gestionnaire de mappage qui traitera les appels à cette fonction. Pour traiter ces appels, le gestionnaire de mappage recevra un `ethereum.Call` comme argument avec les entrées et sorties saisies de l'appel. Les appels effectués à n'importe quelle profondeur dans la chaîne d'appels d'une transaction déclencheront le mappage, permettant de capturer l'activité avec le contrat de source de données via des contrats proxy. - -Les gestionnaires d'appels ne se déclencheront que dans l'un des deux cas suivants : lorsque la fonction spécifiée est appelée par un compte autre que le contrat lui-même ou lorsqu'elle est marquée comme externe dans Solidity et appelée dans le cadre d'une autre fonction du même contrat. - -> **Remarque :** Les gestionnaires d'appels dépendent actuellement de l'API de suivi de parité. Certains réseaux, tels que la chaîne BNB et Arbitrum, ne prennent pas en charge cette API. Si un subgraph indexant l’un de ces réseaux contient un ou plusieurs gestionnaires d’appels, il ne démarrera pas la synchronisation. Les développeurs de subgraphs devraient plutôt utiliser des gestionnaires d'événements. Ceux-ci sont bien plus performants que les gestionnaires d'appels et sont pris en charge sur tous les réseaux evm. - -### Définir un gestionnaire d'appels - -Pour définir un gestionnaire d'appels dans votre manifeste, ajoutez simplement un tableau `callHandlers` sous la source de données à laquelle vous souhaitez vous abonner. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -La `fonction` est la signature de fonction normalisée permettant de filtrer les appels. La propriété `handler` est le nom de la fonction de votre mappage que vous souhaitez exécuter lorsque la fonction cible est appelée dans le contrat de source de données. - -### Fonction de cartographie - -Chaque gestionnaire d'appel prend un seul paramètre dont le type correspond au nom de la fonction appelée. Dans l'exemple de subgraph ci-dessus, le mappage contient un gestionnaire lorsque la fonction `createGravatar` est appelée et reçoit un paramètre `CreateGravatarCall` comme argument : - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -La fonction `handleCreateGravatar` prend un nouveau `CreateGravatarCall` qui est une sous-classe de `ethereum. Call`, fournie par `@graphprotocol/graph-ts`, qui inclut les entrées et sorties saisies de l’appel. Le type `CreateGravatarCall` est généré pour vous lorsque vous exécutez `graph codegen`. - -## Block Handlers - -En plus de s'abonner à des événements de contrat ou à des appels de fonction, un subgraph peut souhaiter mettre à jour ses données à mesure que de nouveaux blocs sont ajoutés à la chaîne. Pour y parvenir, un subgraph peut exécuter une fonction après chaque bloc ou après des blocs correspondant à un filtre prédéfini. - -### Filtres pris en charge - -#### Filtre d'appel - -```yaml -filter: - kind: call -``` - -_Le gestionnaire défini sera appelé une fois pour chaque bloc contenant un appel au contrat (source de données) sous lequel le gestionnaire est défini._ - -> **Remarque :** Le filtre `call` dépend actuellement de l'API de traçage de parité. Certains réseaux, tels que la chaîne BNB et Arbitrum, ne prennent pas en charge cette API. Si un subgraph indexant l'un de ces réseaux contient un ou plusieurs gestionnaires de blocs avec un filtre `call`, il ne démarrera pas la synchronisation. - -L'absence de filtre pour un gestionnaire de bloc garantira que le gestionnaire est appelé à chaque bloc. Une source de données ne peut contenir qu'un seul gestionnaire de bloc pour chaque type de filtre. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Filtre d'interrogation - -> **Nécessite `specVersion` >= 0.0.8** - -> **Remarque :** Les filtres d'interrogation ne sont disponibles que sur les sources de données de `genre : ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -Le gestionnaire défini sera appelé une fois pour tous les blocs `n`, où `n` est la valeur fournie dans le champ `every`. Cette configuration permet au sugraph d'effectuer des opérations spécifiques à intervalles réguliers. - -#### Le filtre Once - -> **Nécessite `specVersion` >= 0.0.8** - -> **Remarque :** Les filtres Once ne sont disponibles que sur les sources de données de `genre : Ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -Le gestionnaire défini avec le filtre once ne sera appelé qu'une seule fois avant l'exécution de tous les autres gestionnaires. Cette configuration permet au subgraph d'utiliser le gestionnaire comme gestionnaire d'initialisation, effectuant des tâches spécifiques au début de l'indexation. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Fonction de cartographie - -La fonction de mappage recevra un `ethereum.Block` comme seul argument. Comme les fonctions de mappage pour les événements, cette fonction peut accéder aux entités de subgraphs existantes dans le magasin, appeler des contrats intelligents et créer ou mettre à jour des entités. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Événements anonymes - -Si vous devez traiter des événements anonymes dans Solidity, cela peut être réalisé en fournissant le sujet 0 de l'événement, comme dans l'exemple : - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -Un événement ne sera déclenché que lorsque la signature et le sujet 0 correspondent. Par défaut, `topic0` est égal au hachage de la signature de l'événement. - -## Reçus de transaction dans les gestionnaires d'événements - -À partir de `specVersion` `0.0.5` et `apiVersion` `0.0.7`, les gestionnaires d'événements peuvent avoir accès au reçu du transaction qui les a émis. - -Pour ce faire, les gestionnaires d'événements doivent être déclarés dans le manifeste du subgraph avec la nouvelle clé `receipt: true`, qui est facultative et vaut par défaut false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Dans la fonction de gestionnaire, le reçu est accessible dans le champ `Event.receipt`. Lorsque la clé `receipt` est définie sur `false` ou omise dans le manifeste, une valeur `null` sera renvoyée à la place. - -## Fonctionnalités expérimentales - -À partir de `specVersion` `0.0.4`, les fonctionnalités de subgraph doivent être explicitement déclarées dans la section `features` au niveau supérieur du fichier manifeste, en utilisant leur `camelCase`, comme indiqué dans le tableau ci-dessous : - -| Fonctionnalité | Nom | -| --------------------------------------------------------------- | ------------------- | -| [Erreurs non fatales](#non-fatal-errors) | `erreursnonfatales` | -| [Recherche en texte intégral](#defining-fulltext-search-fields) | `fullTextSearch` | -| [La greffe](#grafting-onto-existing-subgraphs) | `grafting` | - -Par exemple, si un subgraph utilise les fonctionnalités **Recherche en texte intégral** et **Erreurs non fatales** features, le `features` dans le manifeste doit être : - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Notez que l'utilisation d'une fonctionnalité sans la déclarer entraînera une **erreur de validation** lors du déploiement du sous-graphe, mais aucune erreur ne se produira si une fonctionnalité est déclarée mais n'est pas utilisée. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Erreurs non fatales - -Les erreurs d'indexation sur les subgraphs déjà synchronisés entraîneront, par défaut, l'échec du subgraph et l'arrêt de la synchronisation. Les subgraphs peuvent également être configurés pour continuer la synchronisation en présence d'erreurs, en ignorant les modifications apportées par le gestionnaire qui a provoqué l'erreur. Cela donne aux auteurs de subgraphs le temps de corriger leurs subgraphs pendant que les requêtes continuent d'être traitées sur le dernier bloc, bien que les résultats puissent être incohérents en raison du bogue à l'origine de l'erreur. Notez que certaines erreurs sont toujours fatales. Pour être non fatale, l'erreur doit être connue pour être déterministe. - -> **Remarque :** Le réseau Graph ne prend pas encore en charge les erreurs non fatales et les développeurs ne doivent pas déployer de subgraphs utilisant cette fonctionnalité sur le réseau via le Studio. - -L'activation des erreurs non fatales nécessite la définition de l'indicateur de fonctionnalité suivant sur le manifeste du subgraph : - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -La requête doit également choisir d'interroger les données présentant des incohérences potentielles via l'argument `subgraphError`. Il est également recommandé d'interroger `_meta` pour vérifier si le subgraph a ignoré des erreurs, comme dans l'exemple : - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Si le subgraph rencontre une erreur, cette requête renverra à la fois les données et une erreur graphql avec le message `"indexing_error"`, comme dans cet exemple de réponse : - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Greffe sur des subgraphs existants - -> **Remarque :** il n'est pas recommandé d'utiliser le greffage lors de la mise à niveau initiale vers The Graph Network. Apprenez-en plus [ici](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -Lorsqu'un subgraph est déployé pour la première fois, il commence à indexer les événements au niveau du bloc Genesis de la chaîne correspondante (ou au `startBlock` défini avec chaque source de données). Dans certaines circonstances ; il est avantageux de réutiliser les données d'un subgraph existant et de commencer l'indexation à un bloc beaucoup plus tard. Ce mode d'indexation est appelé _Grafting_. Le greffage est, par exemple, utile pendant le développement pour surmonter rapidement de simples erreurs dans les mappages ou pour faire fonctionner à nouveau temporairement un subgraph existant après son échec. - -Un subgraph est greffé sur un subgraph de base lorsque le manifeste du soubgraph dans `subgraph.yaml` contient un bloc `graft` au niveau supérieur : - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -Lorsqu'un subgraph dont le manifeste contient un bloc `graft` est déployé, Graph Node copiera les données du subgraph `base` jusqu'au `bloc` donné inclus. puis continuez à indexer le nouveau subgraph à partir de ce bloc. Le subgraph de base doit exister sur l'instance Graph Node cible et doit avoir été indexé jusqu'au moins au bloc donné. En raison de cette restriction, le greffage ne doit être utilisé que pendant le développement ou en cas d'urgence pour accélérer la production d'un subgraph équivalent non greffé. - -Étant donné que le greffage copie plutôt que l'indexation des données de base, il est beaucoup plus rapide d'amener le susgraph dans le bloc souhaité que l'indexation à partir de zéro, bien que la copie initiale des données puisse encore prendre plusieurs heures pour de très gros subgraphs. Pendant l'initialisation du subgraph greffé, le nœud graphique enregistrera des informations sur les types d'entités qui ont déjà été copiés. - -Le subgraph greffé peut utiliser un schéma GraphQL qui n'est pas identique à celui du subgraph de base, mais simplement compatible avec celui-ci. Il doit s'agir d'un schéma de subgraph valide à part entière, mais il peut s'écarter du schéma du subgraph de base des manières suivantes : - -- Il ajoute ou supprime des types d'entités -- Il supprime les attributs des types d'entités -- Il ajoute des attributs nullables aux types d'entités -- Il transforme les attributs non nullables en attributs nullables -- Il ajoute des valeurs aux énumérations -- Il ajoute ou supprime des interfaces -- Cela change pour quels types d'entités une interface est implémentée - -> **[Gestion des fonctionnalités](#experimental-features) :** le `greffage` doit être déclaré sous `features`dans le manifeste du subgraph. - -## IPFS/Arweave File Data Sources - -Les sources de données de fichiers sont une nouvelle fonctionnalité de subgraph permettant d'accéder aux données hors chaîne pendant l'indexation de manière robuste et extensible. Les sources de données de fichiers prennent en charge la récupération de fichiers depuis IPFS et Arweave. - -> Cela jette également les bases d’une indexation déterministe des données hors chaîne, ainsi que de l’introduction potentielle de données arbitraires provenant de HTTP. - -### Aperçu - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> Cela remplace l'API `ipfs.cat` existante - -### Guide de mise à niveau - -#### Mettre à jour `graph-ts` et `graph-cli` - -Les sources de données de fichiers nécessitent graph-ts >=0.29.0 et graph-cli >=0.33.1 - -#### Ajouter un nouveau type d'entité qui sera mis à jour lorsque des fichiers seront trouvés - -Les sources de données de fichier ne peuvent pas accéder ni mettre à jour les entités basées sur une chaîne, mais doivent mettre à jour les entités spécifiques au fichier. - -Cela peut impliquer de diviser les champs des entités existantes en entités distinctes, liées entre elles. - -Entité combinée d'origine : - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Nouvelle entité scindée : - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Si la relation est 1:1 entre l'entité parent et l'entité de source de données de fichier résultante, le modèle le plus simple consiste à lier l'entité parent à une entité de fichier résultante en utilisant le CID IPFS comme recherche. Contactez Discord si vous rencontrez des difficultés pour modéliser vos nouvelles entités basées sur des fichiers ! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Ajoutez une nouvelle source de données modélisée avec `kind: file/ipfs` ou `kind: file/arweave` - -Il s'agit de la source de données qui sera générée lorsqu'un fichier d'intérêt est identifié. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Actuellement, les `abis` sont requis, bien qu'il ne soit pas possible d'appeler des contrats à partir de sources de données de fichiers - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Créer un nouveau gestionnaire pour traiter les fichiers - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -Le CID du fichier sous forme de chaîne lisible est accessible via `dataSource` comme suit : - -```typescript -const cid = dataSource.stringParam() -``` - -Exemple de gestionnaire : - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Générer des sources de données de fichiers si nécessaire - -Vous pouvez désormais créer des sources de données de fichiers lors de l'exécution de gestionnaires basés sur une chaîne : - -- Importez le modèle à partir des `modèles` générés automatiquement -- appeler `TemplateName.create(cid : string)` à partir d'un mappage, où le cid est un identifiant de contenu valide pour IPFS ou Arweave - -Pour IPFS, Graph Node prend en charge les identifiants de contenu [v0 et v1](https://docs.ipfs.tech/concepts/content-addressing/), ainsi que les identifiants de contenu avec des répertoires (par exemple `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Exemple: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Cet exemple de code concerne un sous-graphe de Crypto coven. Le hachage ipfs ci-dessus est un répertoire contenant les métadonnées des jetons pour toutes les NFT de l'alliance cryptographique. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //Ceci crée un chemin vers les métadonnées pour un seul Crypto coven NFT. Il concatène le répertoire avec "/" + nom de fichier + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -Cela créera une nouvelle source de données de fichier, qui interrogera le point d'extrémité IPFS ou Arweave configuré du nœud de graphique, en réessayant si elle n'est pas trouvée. Lorsque le fichier est trouvé, le gestionnaire de la source de données de fichier est exécuté. - -Cet exemple utilise le CID comme recherche entre l'entité `Token` parent et l'entité `TokenMetadata` résultante. - -> Auparavant, c'est à ce stade qu'un développeur de subgraphs aurait appelé `ipfs.cat(CID)` pour récupérer le fichier - -Félicitations, vous utilisez des sources de données de fichiers ! - -#### Déployer vos subgraphs - -Vous pouvez maintenant `construire` et `déployer` votre subgraph sur n'importe quel nœud de graph >=v0.30.0-rc.0. - -#### Limitations - -Les entités et les gestionnaires de sources de données de fichiers sont isolés des autres entités du subgraph, ce qui garantit que leur exécution est déterministe et qu'il n'y a pas de contamination des sources de données basées sur des chaînes. Pour être plus précis : - -- Les entités créées par les sources de données de fichiers sont immuables et ne peuvent pas être mises à jour -- Les gestionnaires de sources de données de fichiers ne peuvent pas accéder à des entités provenant d'autres sources de données de fichiers -- Les entités associées aux sources de données de fichiers ne sont pas accessibles aux gestionnaires basés sur des chaînes - -> Cette contrainte ne devrait pas poser de problème pour la plupart des cas d'utilisation, mais elle peut en compliquer certains. N'hésitez pas à nous contacter via Discord si vous rencontrez des problèmes pour modéliser vos données basées sur des fichiers dans un subgraph ! - -En outre, il n'est pas possible de créer des sources de données à partir d'une source de données de fichier, qu'il s'agisse d'une source de données onchain ou d'une autre source de données de fichier. Cette restriction pourrait être levée à l'avenir. - -#### Meilleures pratiques - -Si vous liez des métadonnées NFT aux jetons correspondants, utilisez le hachage IPFS des métadonnées pour référencer une entité Metadata à partir de l'entité Token. Enregistrez l'entité Metadata en utilisant le hachage IPFS comme identifiant. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -Si vous avez des entités qui sont actualisées plusieurs fois, créez des entités uniques basées sur des fichiers en utilisant le hachage & IPFS ; l'ID de l'entité, et référencez-les en utilisant un champ dérivé dans l'entité basée sur la chaîne. - -> Nous travaillons à l'amélioration de la recommandation ci-dessus, afin que les requêtes ne renvoient que la version "la plus récente" - -#### Problèmes connus - -Les sources de données de fichiers nécessitent actuellement des ABI, même si les ABI ne sont pas utilisées ([problème](https://github.com/graphprotocol/graph-cli/issues/961)). La solution consiste à ajouter n'importe quel ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Exemples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Les Références - -[Sources de données du fichier GIP](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/fr/developing/creating-a-subgraph/_meta.js b/website/pages/fr/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/fr/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/fr/developing/graph-ts/_meta.js b/website/pages/fr/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/fr/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/fr/developing/unit-testing-framework.mdx b/website/pages/fr/developing/unit-testing-framework.mdx index b3c5e2dde822..5fa8e45fb1b2 100644 --- a/website/pages/fr/developing/unit-testing-framework.mdx +++ b/website/pages/fr/developing/unit-testing-framework.mdx @@ -1157,7 +1157,6 @@ De même que les sources de données dynamiques de contrat, les utilisateurs peu ##### Exemple `subgraph.yaml` ```yaml - --- templates: - kind: file/ipfs diff --git a/website/pages/fr/managing/deprecate-a-subgraph.mdx b/website/pages/fr/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/fr/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/fr/mips-faqs.mdx b/website/pages/fr/mips-faqs.mdx deleted file mode 100644 index 7276003edb79..000000000000 --- a/website/pages/fr/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Présentation - -> Remarque : le programme MIPs est fermé depuis mai 2023. Merci à tous les indexeurs qui ont participé ! - -C'est une période passionnante pour participer à l'écosystème The Graph ! Lors du [Graph Day 2022](https://thegraph.com/graph-day/2022/), Yaniv Tal a annoncé la [cessation du service hébergé](https://thegraph.com/blog/sunsetting-hosted-service/), un moment vers lequel l’écosystème Graph travaille depuis de nombreuses années. - -Pour prendre en charge la cessation du service hébergé et la migration de toutes ses activités vers le réseau décentralisé, la Graph Foundation a annoncé le \[programme de fournisseurs d'infrastructures de migration (MIP)(https://thegraph.com/blog/mips-multi -programme-d'incitation-à-indexation-en-chaîne). - -Le programme MIPs est un programme d'incitation destiné aux indexeurs pour les soutenir avec des ressources pour indexer les chaînes au-delà du mainnet Ethereum et aider le protocole The Graph à étendre le réseau décentralisé en une couche d'infrastructure multi-chaînes. - -Le programme MIPs a alloué 0,75 % de l'offre de GRT (75 millions de GRT), dont 0,5 % pour récompenser les indexeurs qui contribuent au démarrage du réseau et 0,25 % alloués aux subventions de réseau pour les développeurs de sous-graphes utilisant des subgraphs multi-chaînes. - -### Ressources utiles - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [Comment devenir un indexeur efficace sur le réseau Graph](https://thegraph.com/blog/how-to-become-indexer/) -- [Centre de connaissances de l'indexeur](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Est-il possible de générer une preuve d'indexation (POI) valide même si un subgraph a échoué ? - -Oui, c'est effectivement le cas. . - -Pour le contexte, la charte d'arbitrage, [en savoir plus sur la charte ici](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), précise la méthodologie de génération d'un POI pour un subgraph défaillant. - -Un membre de la communauté, [SunTzu](https://github.com/suntzu93), a créé un script pour automatiser ce processus conformément à la méthodologie de la charte d'arbitrage. Consultez le dépôt [ici](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Quelle chaîne le programme MIPs encouragera-t-il en premier ? - -La première chaîne qui sera prise en charge sur le réseau décentralisé est Gnosis Chain ! Anciennement connue sous le nom de xDAI, Gnosis Chain est une chaîne basée sur EVM. Gnosis Chain a été sélectionnée comme la première en raison de sa convivialité d'exécution des nœuds, de sa préparation à l'indexeur, de son alignement avec The Graph et de son adoption dans web3. - -### 3. Comment de nouvelles chaînes seront-elles ajoutées au programme MIPs ? - -De nouvelles chaînes seront annoncées tout au long du programme MIP, en fonction de l'état de préparation de l'indexeur, de la demande et du sentiment de la communauté. Les chaînes seront d'abord prises en charge sur le testnet et, par la suite, un GIP sera transmis pour prendre en charge cette chaîne sur le réseau principal. Les indexeurs participant au programme MIPs choisiront les chaînes qu'ils souhaitent soutenir et gagneront des récompenses par chaîne, en plus de gagner des frais de requête et des récompenses d'indexation sur le réseau pour la fourniture de subgraphs. Les participants aux MIP seront notés en fonction de leurs performances, de leur capacité à répondre aux besoins du réseau et du soutien de la communauté. - -### 4. Comment saurons-nous quand le réseau sera prêt pour une nouvelle chaîne ? - -La Graph Foundation surveillera les mesures de performances QoS, les performances du réseau et les canaux communautaires pour mieux évaluer l'état de préparation. La priorité est de garantir que le réseau répond aux besoins de performances de ces dapps multi-chaînes afin de pouvoir migrer leurs subgraphs. - -### La Graph Foundation surveillera les mesures de performances QoS, les performances du réseau et les canaux communautaires pour mieux évaluer l'état de préparation. La priorité est de garantir que le réseau répond aux besoins de performances de ces dapps multi-chaînes afin de pouvoir migrer leurs subgraphs? - -Étant donné que les chaînes varient dans leurs exigences en matière de synchronisation des nœuds, et qu'elles diffèrent en termes de volume de requêtes et d'adoption, les récompenses par chaîne seront décidées à la fin du cycle de cette chaîne pour garantir que tous les commentaires et apprentissags sont capturés. Cependant, à tout moment, les indexeurs pourront également gagner des frais de requête et des récompenses d'indexation une fois que la chaîne sera prise en charge sur le réseau. - -### 6. Devons-nous indexer toutes les chaînes du programme MIP ou pouvons-nous choisir une seule chaîne et l'indexer ? - -Vous êtes invités à indexer la chaîne de votre choix ! L'objectif du programme MIPs est de doter les indexeurs des outils et des connaissances nécessaires pour indexer les chaînes qu'ils souhaitent et prendre en charge les écosystèmes Web3 qui les intéressent. Cependant, pour chaque chaîne, il existe des phases allant du testnet au mainnet. Assurez-vous de terminer toutes les phases des chaînes que vous indexez. Voir [La page de notion MIPs](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) pour en savoir plus sur les phases. - -### 7. Quand les récompenses seront-elles distribuées ? - -Les récompenses MIP seront distribuées par chaîne une fois que les mesures de performances seront atteintes et que les subgraphs migrés seront pris en charge par ces indexeurs. Recherchez des informations sur les récompenses totales par chaîne à mi-chemin du cycle de cette chaîne. - -### 8. Comment fonctionne la notation ? - -Les indexeurs concourront pour des récompenses basées sur leurs scores tout au long du programme dans le classement. La notation du programme sera basée sur : - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. Comment les rôles Discord sont-ils assignés ? - -Les modérateurs assigneront les rôles dans les prochains jours. - -### 10. Est-il possible de démarrer le programme sur un réseau de test, puis de passer au réseau principal ? Serez-vous capable d'identifier mon nœud et d'en tenir compte lors de la distribution des récompenses ? - -Oui, c’est effectivement ce que l’on attend de vous. Plusieurs phases sont sur Görli et une sur le mainnet. - -### 11. À quel moment attendez-vous des participants qu'ils ajoutent un déploiement de réseau principal ? - -Il sera nécessaire d'avoir un indexeur de mainnet pendant la phase 3. Plus d'informations à ce sujet seront [partagées bientôt sur cette page de notion.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Les récompenses seront-elles soumises à des conditions d'acquisition ? - -Le pourcentage qui sera distribué à la fin du programme sera soumis à l'acquisition des droits. Plus d’informations à ce sujet seront partagées dans l’accord d’indexation. - -### 13. Pour les équipes comptant plus d'un membre, tous les membres de l'équipe se verront-ils attribuer un rôle Discord MIPs ? - -Oui - -### 14. Est-il possible d'utiliser les jetons verrouillés du programme de conservation de graphiques pour participer au testnet MIPs ? - -Oui - -### 15. Pendant le programme MIP, y aura-t-il une période pour contester les POI invalides ? - -Être décidé. Veuillez revenir périodiquement sur cette page pour plus de détails à ce sujet ou si votre demande est urgente, veuillez envoyer un e-mail à info@thegraph.foundation - -### 17. Peut-on combiner deux contrats d'acquisition progressive(vesting contracts) ? - -Non. Les options sont les suivantes : vous pouvez déléguer l’un à l’autre ou exécuter deux indexeurs distincts. - -### 18. Questions relatives au KYC ? - -Veuillez envoyer un e-mail à info@thegraph.foundation - -### 19. Je ne suis pas prêt à indexer la chaîne Gnosis, puis-je intervenir et commencer l'indexation à partir d'une autre chaîne lorsque je suis prêt ? - -Oui - -### 20. Existe-t-il des régions recommandées pour exécuter les serveurs ? - -Nous ne donnons pas de recommandations sur les régions. Lorsque vous choisissez des emplacements, vous voudrez peut-être réfléchir aux principaux marchés pour les crypto-monnaies. - -### 21. What is “handler gas cost”? - -C'est la mesure déterministe du coût d'exécution d'un gestionnaire. Contrairement à ce que son nom pourrait laisser penser, il n’est pas lié au coût du gaz sur les blockchains. diff --git a/website/pages/fr/querying/_meta.js b/website/pages/fr/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/fr/querying/_meta.js +++ b/website/pages/fr/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/fr/querying/graph-client/_meta.js b/website/pages/fr/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/fr/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ha/_meta.js b/website/pages/ha/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/ha/_meta.js +++ b/website/pages/ha/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/ha/cookbook/avoid-eth-calls.mdx b/website/pages/ha/cookbook/avoid-eth-calls.mdx new file mode 100644 index 000000000000..25fcb8b0db9d --- /dev/null +++ b/website/pages/ha/cookbook/avoid-eth-calls.mdx @@ -0,0 +1,116 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ha/cookbook/derivedfrom.mdx b/website/pages/ha/cookbook/derivedfrom.mdx new file mode 100644 index 000000000000..75827a185a6b --- /dev/null +++ b/website/pages/ha/cookbook/derivedfrom.mdx @@ -0,0 +1,87 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/developing/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ha/cookbook/how-to-secure-api-keys-using-nextjs-server-components.mdx b/website/pages/ha/cookbook/how-to-secure-api-keys-using-nextjs-server-components.mdx new file mode 100644 index 000000000000..e37d83acbe78 --- /dev/null +++ b/website/pages/ha/cookbook/how-to-secure-api-keys-using-nextjs-server-components.mdx @@ -0,0 +1,123 @@ +--- +title: How to Secure API Keys Using Next.js Server Components +--- + +## Overview + +We can use [Next.js server components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) to properly secure our API key from exposure in the frontend of our dapp. To further increase our API key security, we can also [restrict our API key to certain subgraphs or domains in Subgraph Studio](/cookbook/upgrading-a-subgraph/#securing-your-api-key). + +In this cookbook, we will go over how to create a Next.js server component that queries a subgraph while also hiding the API key from the frontend. + +### Caveats + +- Next.js server components do not protect API keys from being drained using denial of service attacks. +- The Graph Network gateways have denial of service detection and mitigation strategies in place, however using server components may weaken these protections. +- Next.js server components introduce centralization risks as the server can go down. + +### Why It's Needed + +In a standard React application, API keys included in the frontend code can be exposed to the client-side, posing a security risk. While `.env` files are commonly used, they don't fully protect the keys since React's code is executed on the client side, exposing the API key in the headers. Next.js Server Components address this issue by handling sensitive operations server-side. + +### Using client-side rendering to query a subgraph + +![Client-side rendering](/img/api-key-client-side-rendering.png) + +### Prerequisites + +- An API key from [Subgraph Studio](https://thegraph.com/studio) +- Basic knowledge of Next.js and React. +- An existing Next.js project that uses the [App Router](https://nextjs.org/docs/app). + +## Step-by-Step Cookbook + +### Step 1: Set Up Environment Variables + +1. In our Next.js project root, create a `.env.local` file. +2. Add our API key: `API_KEY=`. + +### Step 2: Create a Server Component + +1. In our `components` directory, create a new file, `ServerComponent.js`. +2. Use the provided example code to set up the server component. + +### Step 3: Implement Server-Side API Request + +In `ServerComponent.js`, add the following code: + +```javascript +const API_KEY = process.env.API_KEY + +export default async function ServerComponent() { + const response = await fetch( + `https://gateway-arbitrum.network.thegraph.com/api/${API_KEY}/subgraphs/id/HUZDsRpEVP2AvzDCyzDHtdc64dyDxx8FQjzsmqSg4H3B`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + query: /* GraphQL */ ` + { + factories(first: 5) { + id + poolCount + txCount + totalVolumeUSD + } + } + `, + }), + }, + ) + + const responseData = await response.json() + const data = responseData.data + + return ( +
+

Server Component

+ {data ? ( +
    + {data.factories.map((factory) => ( +
  • +

    ID: {factory.id}

    +

    Pool Count: {factory.poolCount}

    +

    Transaction Count: {factory.txCount}

    +

    Total Volume USD: {factory.totalVolumeUSD}

    +
  • + ))} +
+ ) : ( +

Loading data...

+ )} +
+ ) +} +``` + +### Step 4: Use the Server Component + +1. In our page file (e.g., `pages/index.js`), import `ServerComponent`. +2. Render the component: + +```javascript +import ServerComponent from './components/ServerComponent' + +export default function Home() { + return ( +
+ +
+ ) +} +``` + +### Step 5: Run and Test Our Dapp + +Start our Next.js application using `npm run dev`. Verify that the server component is fetching data without exposing the API key. + +![Server-side rendering](/img/api-key-server-side-rendering.png) + +### Conclusion + +By utilizing Next.js Server Components, we've effectively hidden the API key from the client-side, enhancing the security of our application. This method ensures that sensitive operations are handled server-side, away from potential client-side vulnerabilities. Finally, be sure to explore [other API key security measures](/cookbook/upgrading-a-subgraph/#securing-your-api-key) to increase your API key security even further. diff --git a/website/pages/ha/cookbook/immutable-entities-bytes-as-ids.mdx b/website/pages/ha/cookbook/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..725e53d1cf53 --- /dev/null +++ b/website/pages/ha/cookbook/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,190 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ha/cookbook/pruning.mdx b/website/pages/ha/cookbook/pruning.mdx new file mode 100644 index 000000000000..d79d5b8911f9 --- /dev/null +++ b/website/pages/ha/cookbook/pruning.mdx @@ -0,0 +1,55 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ha/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ha/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index d1bf3919519d..000000000000 --- a/website/pages/ha/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> If a network is not supported on the Hosted Service, you can run your own [graph-node](https://github.com/graphprotocol/graph-node) to index it. - -This page explains how to deploy a subgraph to the Hosted Service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a Hosted Service account - -Before using the Hosted Service, create an account in our Hosted Service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [Hosted Service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the Hosted Service - -Before deploying the subgraph, you need to create it in The Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _'Add Subgraph'_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in the Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Defining a Subgraph section](/developing/defining-a-subgraph). - -## Deploy a Subgraph on the Hosted Service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell the Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, the Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the Hosted Service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The Hosted Service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the Hosted Service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the Hosted Service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's Hosted Service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -When a new version of a subgraph is deployed, the previous version is archived (deleted from the graph-node DB). This only happens if the previous version is not published to The Graph's decentralized network. - -When a subgraph version isn’t queried for over 45 days, that version is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index d6f0f891c6cc..000000000000 --- a/website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Deploying a Subgraph to the Subgraph Studio ---- - -> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). - -These are the steps to deploy your subgraph to the Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in the Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to the Subgraph Studio - -## Installing Graph CLI - -We are using the same CLI to deploy subgraphs to our [hosted service](https://thegraph.com/hosted-service/) and to the [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install graph-cli. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/ha/deploying/hosted-service.mdx b/website/pages/ha/deploying/hosted-service.mdx deleted file mode 100644 index 3b65cfbccdf0..000000000000 --- a/website/pages/ha/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Create a Subgraph - -First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from [Etherscan](https://etherscan.io/). - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from Etherscan, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ha/deploying/subgraph-studio.mdx b/website/pages/ha/deploying/subgraph-studio.mdx deleted file mode 100644 index 5925b2a1dfb9..000000000000 --- a/website/pages/ha/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use the Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -The Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in the Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in the Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask or WalletConnect -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -The Graph Network is not yet able to support all of the data-sources & features available on the Hosted Service. In order to be supported by Indexers on the network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (pst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [blog](https://thegraph.com/blog/building-with-subgraph-studio). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either mainnet or Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Sepolia, which is free to do. This will allow you to see how the subgraph will work in The Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to The Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in The Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in the Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/ha/developing/creating-a-subgraph.mdx b/website/pages/ha/developing/creating-a-subgraph.mdx deleted file mode 100644 index f1c00d1b6646..000000000000 --- a/website/pages/ha/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1236 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-cli) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph on the Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using double quotations `""`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - "unique identifier and primary key of the entity" - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -Every entity has to have an `id` that is unique among all entities of the same type. An entity's `id` value is set when the entity is created. Below are some recommended `id` values to consider when creating new entities. NOTE: The value of `id` must be a `string`. - -- `event.params.id.toHex()` -- `event.transaction.from.toHex()` -- `event.transaction.hash.toHex() + "-" + event.logIndex.toString()` - -We provide the [Graph Typescript Library](https://github.com/graphprotocol/graph-ts) which contains utilies for interacting with the Graph Node store and conveniences for handling smart contract data and entities. You can use this library in your mappings by importing `@graphprotocol/graph-ts` in `mapping.ts`. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to the Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| --------------------------------------------------------- | --------------------------------------------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -| [IPFS on Ethereum Contracts](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### IPFS on Ethereum Contracts - -A common use case for combining IPFS with Ethereum is to store data on IPFS that would be too expensive to maintain on-chain, and reference the IPFS hash in Ethereum contracts. - -Given such IPFS hashes, subgraphs can read the corresponding files from IPFS using `ipfs.cat` and `ipfs.map`. To do this reliably, it is required that these files are pinned to an IPFS node with high availability, so that the [hosted service](https://thegraph.com/hosted-service) IPFS node can find them during indexing. - -> **Note:** The Graph Network does not yet support `ipfs.cat` and `ipfs.map`, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -> **[Feature Management](#experimental-features):** `ipfsOnEthereumContracts` must be declared under `features` in the subgraph manifest. For non EVM chains, the `nonDeterministicIpfs` alias can also be used for the same purpose. - -When running a local Graph Node, the `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` environment variable must be set in order to index subgraphs using this experimental functionality. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Overview - -Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#Limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](https://thegraph.com/docs/en/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](https://thegraph.com/docs/en/developing/graph-ts/api/#entity-and-data-source-context) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ha/developing/creating-a-subgraph/_meta.js b/website/pages/ha/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/ha/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ha/developing/creating-a-subgraph/advanced.mdx b/website/pages/ha/developing/creating-a-subgraph/advanced.mdx new file mode 100644 index 000000000000..45acd610f237 --- /dev/null +++ b/website/pages/ha/developing/creating-a-subgraph/advanced.mdx @@ -0,0 +1,555 @@ +--- +title: Advance Subgraph Features +--- + +## Overview + +Add and implement advanced subgraph features to enhanced your subgraph's built. + +Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: + +| Feature | Name | +| ---------------------------------------------------- | ---------------- | +| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | +| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | + +For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - fullTextSearch + - nonFatalErrors +dataSources: ... +``` + +> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. + +## Timeseries and Aggregations + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. + +### Example Schema + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} + +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +### Defining Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. + +Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. + +#### Available Aggregation Intervals + +- `hour`: sets the timeseries period every hour, on the hour. +- `day`: sets the timeseries period every day, starting and ending at 00:00. + +#### Available Aggregation Functions + +- `sum`: Total of all values. +- `count`: Number of values. +- `min`: Minimum value. +- `max`: Maximum value. +- `first`: First value in the period. +- `last`: Last value in the period. + +#### Example Aggregations Query + +```graphql +{ + stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { + id + timestamp + sum + } +} +``` + +Note: + +To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. + +[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. + +## Non-fatal errors + +Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. + +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. + +Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - nonFatalErrors + ... +``` + +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: + +```graphql +foos(first: 100, subgraphError: allow) { + id +} + +_meta { + hasIndexingErrors +} +``` + +If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: + +```graphql +"data": { + "foos": [ + { + "id": "0xdead" + } + ], + "_meta": { + "hasIndexingErrors": true + } +}, +"errors": [ + { + "message": "indexing_error" + } +] +``` + +## IPFS/Arweave File Data Sources + +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. + +> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. + +### Overview + +Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. + +This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. + +> This replaces the existing `ipfs.cat` API + +### Upgrade guide + +#### Update `graph-ts` and `graph-cli` + +File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 + +#### Add a new entity type which will be updated when files are found + +File data sources cannot access or update chain-based entities, but must update file specific entities. + +This may mean splitting out fields from existing entities into separate entities, linked together. + +Original combined entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + externalURL: String! + ipfsURI: String! + image: String! + name: String! + description: String! + type: String! + updatedAtTimestamp: BigInt + owner: User! +} +``` + +New, split entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + ipfsURI: TokenMetadata + updatedAtTimestamp: BigInt + owner: String! +} + +type TokenMetadata @entity { + id: ID! + image: String! + externalURL: String! + name: String! + description: String! +} +``` + +If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! + +> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. + +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` + +This is the data source which will be spawned when a file of interest is identified. + +```yaml +templates: + - name: TokenMetadata + kind: file/ipfs + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + file: ./src/mapping.ts + handler: handleMetadata + entities: + - TokenMetadata + abis: + - name: Token + file: ./abis/Token.json +``` + +> Currently `abis` are required, though it is not possible to call contracts from within file data sources + +The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. + +#### Create a new handler to process files + +This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). + +The CID of the file as a readable string can be accessed via the `dataSource` as follows: + +```typescript +const cid = dataSource.stringParam() +``` + +Example handler: + +```typescript +import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' +import { TokenMetadata } from '../generated/schema' + +export function handleMetadata(content: Bytes): void { + let tokenMetadata = new TokenMetadata(dataSource.stringParam()) + const value = json.fromBytes(content).toObject() + if (value) { + const image = value.get('image') + const name = value.get('name') + const description = value.get('description') + const externalURL = value.get('external_url') + + if (name && image && description && externalURL) { + tokenMetadata.name = name.toString() + tokenMetadata.image = image.toString() + tokenMetadata.externalURL = externalURL.toString() + tokenMetadata.description = description.toString() + } + + tokenMetadata.save() + } +} +``` + +#### Spawn file data sources when required + +You can now create file data sources during execution of chain-based handlers: + +- Import the template from the auto-generated `templates` +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). + +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). + +Example: + +```typescript +import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' + +const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' +//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. + +export function handleTransfer(event: TransferEvent): void { + let token = Token.load(event.params.tokenId.toString()) + if (!token) { + token = new Token(event.params.tokenId.toString()) + token.tokenID = event.params.tokenId + + token.tokenURI = '/' + event.params.tokenId.toString() + '.json' + const tokenIpfsHash = ipfshash + token.tokenURI + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + + token.ipfsURI = tokenIpfsHash + + TokenMetadataTemplate.create(tokenIpfsHash) + } + + token.updatedAtTimestamp = event.block.timestamp + token.owner = event.params.to.toHexString() + token.save() +} +``` + +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. + +This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. + +> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file + +Congratulations, you are using file data sources! + +#### Deploying your subgraphs + +You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. + +#### Limitations + +File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: + +- Entities created by File Data Sources are immutable, and cannot be updated +- File Data Source handlers cannot access entities from other file data sources +- Entities associated with File Data Sources cannot be accessed by chain-based handlers + +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! + +Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. + +#### Best practices + +If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. + +You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. + +If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. + +> We are working to improve the above recommendation, so queries only return the "most recent" version + +#### Known issues + +File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. + +Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. + +#### Examples + +[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) + +#### References + +[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) + +## Indexed Argument Filters / Topic Filters + +> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` + +Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. + +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. + +- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. + +### How Topic Filters Work + +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. + +- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract Token { + // Event declaration with indexed parameters for addresses + event Transfer(address indexed from, address indexed to, uint256 value); + + // Function to simulate transferring tokens + function transfer(address to, uint256 value) public { + // Emitting the Transfer event with from, to, and value + emit Transfer(msg.sender, to, value); + } +} +``` + +In this example: + +- The `Transfer` event is used to log transactions of tokens between addresses. +- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. +- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. + +#### Configuration in Subgraphs + +Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: + +```yaml +eventHandlers: + - event: SomeEvent(indexed uint256, indexed address, indexed uint256) + handler: handleSomeEvent + topic1: ['0xValue1', '0xValue2'] + topic2: ['0xAddress1', '0xAddress2'] + topic3: ['0xValue3'] +``` + +In this setup: + +- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. +- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. + +#### Filter Logic + +- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. +- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. + +#### Example 1: Tracking Direct Transfers from Address A to Address B + +```yaml +eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleDirectedTransfer + topic1: ['0xAddressA'] # Sender Address + topic2: ['0xAddressB'] # Receiver Address +``` + +In this configuration: + +- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. +- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. +- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. + +#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses + +```yaml +eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleTransferToOrFrom + topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address + topic2: ['0xAddressB', '0xAddressC'] # Receiver Address +``` + +In this configuration: + +- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. +- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. +- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. + +## Declared eth_call + +> Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. + +Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. + +This feature does the following: + +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Allows faster data fetching, resulting in quicker query responses and a better user experience. +- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. + +### Key Concepts + +- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. +- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. +- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). + +#### Scenario without Declarative `eth_calls` + +Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. + +Traditionally, these calls might be made sequentially: + +1. Call 1 (Transactions): Takes 3 seconds +2. Call 2 (Balance): Takes 2 seconds +3. Call 3 (Token Holdings): Takes 4 seconds + +Total time taken = 3 + 2 + 4 = 9 seconds + +#### Scenario with Declarative `eth_calls` + +With this feature, you can declare these calls to be executed in parallel: + +1. Call 1 (Transactions): Takes 3 seconds +2. Call 2 (Balance): Takes 2 seconds +3. Call 3 (Token Holdings): Takes 4 seconds + +Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. + +Total time taken = max (3, 2, 4) = 4 seconds + +#### How it Works + +1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. + +#### Example Configuration in Subgraph Manifest + +Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. + +`Subgraph.yaml` using `event.address`: + +```yaml +eventHandlers: +event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) +handler: handleSwap +calls: + global0X128: Pool[event.address].feeGrowthGlobal0X128() + global1X128: Pool[event.address].feeGrowthGlobal1X128() +``` + +Details for the example above: + +- `global0X128` is the declared `eth_call`. +- The text (`global0X128`) is the label for this `eth_call` which is used when logging errors. +- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` +- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. + +`Subgraph.yaml` using `event.params` + +```yaml +calls: + - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() +``` + +### Grafting onto Existing Subgraphs + +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + +When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. + +A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: + +```yaml +description: ... +graft: + base: Qm... # Subgraph ID of base subgraph + block: 7345624 # Block number +``` + +When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. + +Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. + +The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: + +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented + +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. diff --git a/website/pages/ha/developing/creating-a-subgraph/assemblyscript-mappings.mdx b/website/pages/ha/developing/creating-a-subgraph/assemblyscript-mappings.mdx new file mode 100644 index 000000000000..2ac894695fe1 --- /dev/null +++ b/website/pages/ha/developing/creating-a-subgraph/assemblyscript-mappings.mdx @@ -0,0 +1,113 @@ +--- +title: Writing AssemblyScript Mappings +--- + +## Overview + +The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. + +## Writing Mappings + +For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. + +In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: + +```javascript +import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' +import { Gravatar } from '../generated/schema' + +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleUpdatedGravatar(event: UpdatedGravatar): void { + let id = event.params.id + let gravatar = Gravatar.load(id) + if (gravatar == null) { + gravatar = new Gravatar(id) + } + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} +``` + +The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. + +The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. + +### Recommended IDs for Creating New Entities + +It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. + +- `transfer.id = event.transaction.hash` + +- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` + +- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like + +```typescript +let dayID = event.block.timestamp.toI32() / 86400 +let id = Bytes.fromI32(dayID) +``` + +- Convert constant addresses to `Bytes`. + +`const id = Bytes.fromHexString('0xdead...beef')` + +There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. + +### Handling of entities with identical IDs + +When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. + +If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. + +If no value is set for a field in the new entity with the same ID, the field will result in null as well. + +## Code Generation + +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. + +This is done with + +```sh +graph codegen [--output-dir ] [] +``` + +but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: + +```sh +# Yarn +yarn codegen + +# NPM +npm run codegen +``` + +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. + +```javascript +import { + // The contract class: + Gravity, + // The events classes: + NewGravatar, + UpdatedGravatar, +} from '../generated/Gravity/Gravity' +``` + +In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with + +```javascript +import { Gravatar } from '../generated/schema' +``` + +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. + +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. diff --git a/website/pages/ha/developing/creating-a-subgraph/install-the-cli.mdx b/website/pages/ha/developing/creating-a-subgraph/install-the-cli.mdx new file mode 100644 index 000000000000..282c68973a8a --- /dev/null +++ b/website/pages/ha/developing/creating-a-subgraph/install-the-cli.mdx @@ -0,0 +1,119 @@ +--- +title: Install the Graph CLI +--- + +> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/network/curating/). + +## Overview + +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/creating-a-subgraph/subgraph-manifest/) and compiles the [mappings](/creating-a-subgraph/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. + +## Getting Started + +### Install the Graph CLI + +The Graph CLI is written in TypeScript, and you must have `node` and either `npm` or `yarn` installed to use it. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. + +On your local machine, run one of the following commands: + +#### Using [npm](https://www.npmjs.com/) + +```bash +npm install -g @graphprotocol/graph-cli@latest +``` + +#### Using [yarn](https://yarnpkg.com/) + +```bash +yarn global add @graphprotocol/graph-cli +``` + +The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. + +## Create a Subgraph + +### From an Existing Contract + +The following command creates a subgraph that indexes all events of an existing contract: + +```sh +graph init \ + --product subgraph-studio + --from-contract \ + [--network ] \ + [--abi ] \ + [] +``` + +- The command tries to retrieve the contract ABI from Etherscan. + + - The Graph CLI relies on a public RPC endpoint. While occasional failures are expected, retries typically resolve this issue. If failures persist, consider using a local ABI. + +- If any of the optional arguments are missing, it guides you through an interactive form. + +- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. + +### From an Example Subgraph + +The following command initializes a new project from an example subgraph: + +```sh +graph init --from-example=example-subgraph +``` + +- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. + +- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. + +### Add New `dataSources` to an Existing Subgraph + +`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. + +Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: + +```sh +graph add
[] + +Options: + + --abi Path to the contract ABI (default: download from Etherscan) + --contract-name Name of the contract (default: Contract) + --merge-entities Whether to merge entities with the same name (default: false) + --network-file Networks config file path (default: "./networks.json") +``` + +#### Specifics + +The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option) and creates a new `dataSource`, similar to how the `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. This allows you to index implementation contracts from their proxy contracts. + +- The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: + + - If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. + + - If `false`: a new `entity` & `event` handler should be created with `${dataSourceName}{EventName}`. + +- The contract `address` will be written to the `networks.json` for the relevant network. + +> Note: When using the interactive CLI, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. + +### Getting The ABIs + +The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: + +- If you are building your own project, you will likely have access to your most current ABIs. +- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. + +## SpecVersion Releases + +| Version | Release notes | +| :-: | --- | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | diff --git a/website/pages/ha/developing/creating-a-subgraph/ql-schema.mdx b/website/pages/ha/developing/creating-a-subgraph/ql-schema.mdx new file mode 100644 index 000000000000..90036d1bfab9 --- /dev/null +++ b/website/pages/ha/developing/creating-a-subgraph/ql-schema.mdx @@ -0,0 +1,312 @@ +--- +title: The Graph QL Schema +--- + +## Overview + +The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. + +> Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api/) section. + +### Defining Entities + +Before defining entities, it is important to take a step back and think about how your data is structured and linked. + +- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- It may be useful to imagine entities as "objects containing data", rather than as events or functions. +- You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. +- Each type that should be an entity is required to be annotated with an `@entity` directive. +- By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. + - Mutability comes at a price, so for entity types that will never be modified, such as those containing data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. + - If changes happen in the same block in which the entity was created, then mappings can make changes to immutable entities. Immutable entities are much faster to write and to query so they should be used whenever possible. + +#### Good Example + +The following `Gravatar` entity is structured around a Gravatar object and is a good example of how an entity could be defined. + +```graphql +type Gravatar @entity(immutable: true) { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String + accepted: Boolean +} +``` + +#### Bad Example + +The following example `GravatarAccepted` and `GravatarDeclined` entities are based around events. It is not recommended to map events or function calls to entities 1:1. + +```graphql +type GravatarAccepted @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} + +type GravatarDeclined @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} +``` + +#### Optional and Required Fields + +Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If the field is a scalar field, you get an error when you try to store the entity. If the field references another entity then you get this error: + +``` +Null value resolved for non-null field 'name' +``` + +Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. + +For some entity types the `id` for `Bytes!` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id) ` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. + +### Built-In Scalar Types + +#### GraphQL Supported Scalars + +The following scalars are supported in the GraphQL API: + +| Type | Description | +| --- | --- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | + +### Enums + +You can also create enums within a schema. Enums have the following syntax: + +```graphql +enum TokenStatus { + OriginalOwner + SecondOwner + ThirdOwner +} +``` + +Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: + +More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). + +### Entity Relationships + +An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. + +Relationships are defined on entities just like any other field except that the type specified is that of another entity. + +#### One-To-One Relationships + +Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: + +```graphql +type Transaction @entity(immutable: true) { + id: Bytes! + transactionReceipt: TransactionReceipt +} + +type TransactionReceipt @entity(immutable: true) { + id: Bytes! + transaction: Transaction +} +``` + +#### One-To-Many Relationships + +Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +### Reverse Lookups + +Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. + +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. + +#### Example + +We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! + tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +#### Many-To-Many Relationships + +For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. + +#### Example + +Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [User!]! +} + +type User @entity { + id: Bytes! + name: String! + organizations: [Organization!]! @derivedFrom(field: "members") +} +``` + +A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [UserOrganization!]! @derivedFrom(field: "organization") +} + +type User @entity { + id: Bytes! + name: String! + organizations: [UserOrganization!] @derivedFrom(field: "user") +} + +type UserOrganization @entity { + id: Bytes! # Set to `user.id.concat(organization.id)` + user: User! + organization: Organization! +} +``` + +This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: + +```graphql +query usersWithOrganizations { + users { + organizations { + # this is a UserOrganization entity + organization { + name + } + } + } +} +``` + +This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. + +### Adding comments to the schema + +As per GraphQL spec, comments can be added above schema entity attributes using the hash symbol `#`. This is illustrated in the example below: + +```graphql +type MyFirstEntity @entity { + # unique identifier and primary key of the entity + id: Bytes! + address: Bytes! +} +``` + +## Defining Fulltext Search Fields + +Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. + +A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. + +To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. + +```graphql +type _Schema_ + @fulltext( + name: "bandSearch" + language: en + algorithm: rank + include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] + ) + +type Band @entity { + id: Bytes! + name: String! + description: String! + bio: String + wallet: Address + labels: [Label!]! + discography: [Album!]! + members: [Musician!]! +} +``` + +The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. + +```graphql +query { + bandSearch(text: "breaks & electro & detroit") { + id + name + description + wallet + } +} +``` + +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. + +## Languages supported + +Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". + +Supported language dictionaries: + +| Code | Dictionary | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portuguese | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | + +### Ranking Algorithms + +Supported algorithms for ordering results: + +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | Use the match quality (0-1) of the fulltext query to order the results. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | diff --git a/website/pages/ha/developing/creating-a-subgraph/starting-your-subgraph.mdx b/website/pages/ha/developing/creating-a-subgraph/starting-your-subgraph.mdx new file mode 100644 index 000000000000..5127f01632aa --- /dev/null +++ b/website/pages/ha/developing/creating-a-subgraph/starting-your-subgraph.mdx @@ -0,0 +1,21 @@ +--- +title: Starting Your Subgraph +--- + +## Overview + +The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. + +When you create a [subgraph](/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. + +Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. + +### Start Building + +Start the process and build a subgraph that matches your needs: + +1. [Install the CLI](/developing/creating-a-subgraph/install-the-cli/) - Set up your infrastructure +2. [Subgraph Manifest](/developing/creating-a-subgraph/subgraph-manifest/) - Understand a subgraph's key component +3. [The Graph Ql Schema](/developing/creating-a-subgraph/ql-schema/) - Write your schema +4. [Writing AssemblyScript Mappings](/developing/creating-a-subgraph/assemblyscript-mappings/) - Write your mappings +5. [Advanced Features](/developing/creating-a-subgraph/advanced/) - Customize your subgraph with advanced features diff --git a/website/pages/ha/developing/creating-a-subgraph/subgraph-manifest.mdx b/website/pages/ha/developing/creating-a-subgraph/subgraph-manifest.mdx new file mode 100644 index 000000000000..7476b7175d57 --- /dev/null +++ b/website/pages/ha/developing/creating-a-subgraph/subgraph-manifest.mdx @@ -0,0 +1,534 @@ +--- +title: Subgraph Manifest +--- + +## Overview + +The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. + +The **subgraph definition** consists of the following files: + +- `subgraph.yaml`: Contains the subgraph manifest + +- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL + +- `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) + +### Subgraph Capabilities + +A single subgraph can: + +- Index data from multiple smart contracts (but not multiple networks). + +- Index data from IPFS files using File Data Sources. + +- Add an entry for each contract that requires indexing to the `dataSources` array. + +The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). + +For the example subgraph listed above, `subgraph.yaml` is: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +repository: https://github.com/graphprotocol/graph-tooling +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' + abi: Gravity + startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Gravity + file: ./abis/Gravity.json + eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + - event: UpdatedGravatar(uint256,address,string,string) + handler: handleUpdatedGravatar + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCall + filter: + kind: call + file: ./src/mapping.ts +``` + +## Subgraph Entries + +> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/developing/creating-a-subgraph/ql-schema/). + +The important entries to update for the manifest are: + +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. + +- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. + +- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. + +- `features`: a list of all used [feature](#experimental-features) names. + +- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. + +- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. + +- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. + +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + +- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. + +- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. + +- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. + +- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. + +- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. + +A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. + +## Event Handlers + +Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. + +### Defining an Event Handler + +An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: dev + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + eventHandlers: + - event: Approval(address,address,uint256) + handler: handleApproval + - event: Transfer(address,address,uint256) + handler: handleTransfer + topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. +``` + +## Call Handlers + +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. + +Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. + +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. + +### Defining a Call Handler + +To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar +``` + +The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. + +### Mapping Function + +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: + +```typescript +import { CreateGravatarCall } from '../generated/Gravity/Gravity' +import { Transaction } from '../generated/schema' + +export function handleCreateGravatar(call: CreateGravatarCall): void { + let id = call.transaction.hash + let transaction = new Transaction(id) + transaction.displayName = call.inputs._displayName + transaction.imageUrl = call.inputs._imageUrl + transaction.save() +} +``` + +The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. + +## Block Handlers + +In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. + +### Supported Filters + +#### Call Filter + +```yaml +filter: + kind: call +``` + +_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ + +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. + +The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: dev + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCallToContract + filter: + kind: call +``` + +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + +### Mapping Function + +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. + +```typescript +import { ethereum } from '@graphprotocol/graph-ts' + +export function handleBlock(block: ethereum.Block): void { + let id = block.hash + let entity = new Block(id) + entity.save() +} +``` + +## Anonymous Events + +If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: + +```yaml +eventHandlers: + - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) + topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' + handler: handleGive +``` + +An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. + +## Transaction Receipts in Event Handlers + +Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. + +To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. + +```yaml +eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + receipt: true +``` + +Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. + +## Order of Triggering Handlers + +The triggers for a data source within a block are ordered using the following process: + +1. Event and call triggers are first ordered by transaction index within the block. +2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. +3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. + +These ordering rules are subject to change. + +> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. + +## Data Source Templates + +A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. + +The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. + +### Data Source for the Main Contract + +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: Factory + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - Directory + abis: + - name: Factory + file: ./abis/factory.json + eventHandlers: + - event: NewExchange(address,address) + handler: handleNewExchange +``` + +### Data Source Templates for Dynamically Created Contracts + +Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + # ... other source fields for the main contract ... +templates: + - name: Exchange + kind: ethereum/contract + network: mainnet + source: + abi: Exchange + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/exchange.ts + entities: + - Exchange + abis: + - name: Exchange + file: ./abis/exchange.json + eventHandlers: + - event: TokenPurchase(address,uint256,uint256) + handler: handleTokenPurchase + - event: EthPurchase(address,uint256,uint256) + handler: handleEthPurchase + - event: AddLiquidity(address,uint256,uint256) + handler: handleAddLiquidity + - event: RemoveLiquidity(address,uint256,uint256) + handler: handleRemoveLiquidity +``` + +### Instantiating a Data Source Template + +In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + // Start indexing the exchange; `event.params.exchange` is the + // address of the new exchange contract + Exchange.create(event.params.exchange) +} +``` + +> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> +> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. + +### Data Source Context + +Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + let context = new DataSourceContext() + context.setString('tradingPair', event.params.tradingPair) + Exchange.createWithContext(event.params.exchange, context) +} +``` + +Inside a mapping of the `Exchange` template, the context can then be accessed: + +```typescript +import { dataSource } from '@graphprotocol/graph-ts' + +let context = dataSource.context() +let tradingPair = context.getString('tradingPair') +``` + +There are setters and getters like `setString` and `getString` for all value types. + +## Start Blocks + +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. + +```yaml +dataSources: + - kind: ethereum/contract + name: ExampleSource + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: ExampleContract + startBlock: 6627917 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - User + abis: + - name: ExampleContract + file: ./abis/ExampleContract.json + eventHandlers: + - event: NewEvent(address,address) + handler: handleNewEvent +``` + +> **Note:** The contract creation block can be quickly looked up on Etherscan: +> +> 1. Search for the contract by entering its address in the search bar. +> 2. Click on the creation transaction hash in the `Contract Creator` section. +> 3. Load the transaction details page where you'll find the start block for that contract. + +## Indexer Hints + +The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. + +> This feature is available from `specVersion: 1.0.0` + +### Prune + +`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: + +1. `"never"`: No pruning of historical data; retains the entire history. +2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. +3. A specific number: Sets a custom limit on the number of historical blocks to retain. + +``` + indexerHints: + prune: auto +``` + +> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. + +History as of a given block is required for: + +- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history +- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block +- Rewinding the subgraph back to that block + +If historical data as of the block has been pruned, the above capabilities will not be available. + +> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. + +For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: + +To retain a specific amount of historical data: + +``` + indexerHints: + prune: 1000 # Replace 1000 with the desired number of blocks to retain +``` + +To preserve the complete history of entity states: + +``` +indexerHints: + prune: never +``` diff --git a/website/pages/ha/developing/graph-ts/_meta.js b/website/pages/ha/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/ha/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ha/managing/deprecate-a-subgraph.mdx b/website/pages/ha/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/ha/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/ha/mips-faqs.mdx b/website/pages/ha/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/ha/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/ha/network/contracts.mdx b/website/pages/ha/network/contracts.mdx new file mode 100644 index 000000000000..6abd80577ced --- /dev/null +++ b/website/pages/ha/network/contracts.mdx @@ -0,0 +1,29 @@ +--- +title: Protocol Contracts +--- + +import { ProtocolContractsTable } from '@/src/contracts' + +Below are the deployed contracts which power The Graph Network. Visit the official [contracts repository](https://github.com/graphprotocol/contracts) to learn more. + +## Arbitrum + +This is the principal deployment of The Graph Network. + + + +## Mainnet + +This was the original deployment of The Graph Network. [Learn more](/arbitrum/arbitrum-faq) about The Graph's scaling with Arbitrum. + + + +## Arbitrum Sepolia + +This is the primary testnet for The Graph Network. Testnet is predominantly used by core developers and ecosystem participants for testing purposes. There are no guarantees of service or availability on The Graph's testnets. + + + +## Sepolia + + diff --git a/website/pages/ha/querying/_meta.js b/website/pages/ha/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/ha/querying/_meta.js +++ b/website/pages/ha/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/ha/querying/graph-client/_meta.js b/website/pages/ha/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/ha/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/hi/_meta.js b/website/pages/hi/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/hi/_meta.js +++ b/website/pages/hi/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/hi/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/hi/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index b8f79345398e..000000000000 --- a/website/pages/hi/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: होस्ट की गई सेवा में एक सबग्राफ तैनात करना ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## एक्सेस टोकन स्टोर करें - -खाता बनाने के बाद, अपने [डैशबोर्ड](https://thegraph.com/hosted-service/dashboard) पर नेविगेट करें। डैशबोर्ड पर प्रदर्शित एक्सेस टोकन को कॉपी करें और `ग्राफ़ ऑथ --प्रोडक्ट होस्टेड-सर्विस ` चलाएँ। यह आपके कंप्यूटर पर एक्सेस टोकन स्टोर करेगा। आपको केवल एक बार ऐसा करने की आवश्यकता है, या यदि आप कभी भी एक्सेस टोकन को पुन: उत्पन्न करते हैं। - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - प्रीव्यू इमेज और सबग्राफ के लिए थंबनेल के तौर पर इस्तेमाल करने के लिए इमेज चुनें। - -**Subgraph Name** - उस अकाउंट के नाम के साथ जिसके तहत सबग्राफ बनाया गया है, यह `account-name/subgraph-name`-शैली को भी परिभाषित करेगा वह नाम जो तैनाती और ग्राफ़िकल एंडपॉइंट के लिए उपयोग होगा। _इस फील्ड को बाद में बदला नहीं जा सकता।_ - -**Account** - वह खाता जिसके अंतर्गत सबग्राफ बनाया गया है। यह किसी व्यक्ति या संस्था का खाता हो सकता है। _सबग्राफ को बाद में खातों के बीच नहीं ले जाया जा सकता।_ - -**Subtitle** - टेक्स्ट जो सबग्राफ कार्ड में दिखाई देगा। - -**Description** - सबग्राफ का विवरण, सबग्राफ विवरण पृष्ठ पर दिखाई देता है - -**GitHub URL** - GitHub पर सबग्राफ रिपॉजिटरी का लिंक है। - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -आप `yarn deploy` चलाकर सबग्राफ तैनात करते हैं - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -वह सबग्राफ स्थिति पर स्विच करता है `Synced` एक बार ग्राफ़ नोड ने ऐतिहासिक ब्लॉकों से सभी डेटा निकाल लिए। ग्राफ़ नोड आपके सबग्राफ के लिए ब्लॉकों का निरीक्षण करना जारी रखेगा क्योंकि इन ब्लॉकों का खनन किया जाता है। - -## एक सबग्राफ को फिर से तैनात करना - -अपने सबग्राफ की परिभाषा में परिवर्तन करते समय, उदाहरण के लिए, इकाई मैपिंग में किसी समस्या को ठीक करने के लिए, अपने सबग्राफ के अपडेट किए गए संस्करण को तैनात करने के लिए `yarn deploy` कमांड को फिर से चलाएँ। सबग्राफ के किसी भी अपडेट के लिए आवश्यक है कि ग्राफ़ नोड आपके पूरे सबग्राफ को फिर से अनुक्रमित करे, फिर से उत्पत्ति ब्लॉक से शुरू हो। - -यदि आपका पहले से तैनात सबग्राफ अभी भी `समन्वयित` स्थिति में है, तो इसे तुरंत पुन: तैनात किए गए नए संस्करण से बदल दिया जाएगा। यदि पहले से तैनात सबग्राफ पहले से ही पूरी तरह से सिंक हो गया है, तो ग्राफ़ नोड नए तैनात संस्करण को `पड़ा हुआ संस्करण` के रूप में चिह्नित करेगा, इसे पृष्ठभूमि में सिंक करेगा, और केवल एक बार सिंक करने के बाद वर्तमान में तैनात संस्करण को नए से बदल देगा जो समाप्त हो गया है। यह सुनिश्चित करता है कि आपके पास काम करने के लिए एक सबग्राफ है जबकि नया संस्करण सिंक हो रहा है। - -## सबग्राफ को कई नेटवर्क पर तैनात करना - -कुछ मामलों में, आप एक ही सबग्राफ को इसके सभी कोड को डुप्लिकेट किए बिना कई नेटवर्क पर तैनात करना चाहेंगे। इसके साथ आने वाली मुख्य चुनौती यह है कि इन नेटवर्कों पर अनुबंध के पते अलग-अलग हैं। - -### ग्राफ-सी एल आई का उपयोग करना - -दोनों `graph build` (`v0.29.0` से) और `graph deploy` (`v0.32.0` से) दो नए विकल्प स्वीकार करते हैं: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -आप किसी `json` मानक फ़ाइल(डिफ़ॉल्ट रूप से `networks.json`) से नेटवर्क कॉन्फ़िगरेशन निर्दिष्ट करने के लिए `--network` विकल्प का उपयोग कर सकते हैं ताकि आप आसानी से अपना सबग्राफ अपडेट कर सकें विकास के दौरान। - -**ध्यान दें:** `init` कमांड अब प्रदान की गई जानकारी के आधार पर एक `networks.json` स्वतः उत्पन्न करेगा। तब आप मौजूदा को अपडेट करने या अतिरिक्त नेटवर्क जोड़ने में सक्षम होंगे - -यदि आपके पास `networks.json` फ़ाइल नहीं है, तो आपको निम्न संरचना के साथ मैन्युअल रूप से एक बनाने की आवश्यकता होगी: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**ध्यान दें:** कॉन्फ़िग फ़ाइल में आपको कोई `टेम्प्लेट` (यदि आपके पास है) निर्दिष्ट करने की आवश्यकता नहीं है, केवल `डेटा सोर्स`। यदि `subgraph.yaml` फ़ाइल में कोई `टेम्प्लेट` घोषित है, तो उनका नेटवर्क स्वचालित रूप से `--network` विकल्प के साथ निर्दिष्ट एक में अपडेट हो जाएगा. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -आपकी नेटवर्क कॉन्फ़िग फ़ाइल इस तरह दिखनी चाहिए: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -अब हम निम्न में से कोई एक कमांड चला सकते हैं: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -अब आप `yarn deploy` करने के लिए तैयार हैं| - -**ध्यान दें:** जैसा कि पहले उल्लेख किया गया है, `graph-cli 0.32.0` के बाद से आप सीधे `yarn deploy` चला सकते हैं `--नेटवर्क` विकल्प के साथ: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Subgraph.yaml टेम्पलेट का उपयोग करना - -पुराने graph-cli संस्करणों के लिए एक समाधान जो कॉन्ट्रैक्ट एड्रेस जैसे पहलुओं को पैरामीटर करने की अनुमति देता है, [Mustache](https://mustache.github.io/) और [हैंडलबार](https://handlebarsjs.com/) जैसी टेम्प्लेटिंग प्रणाली का उपयोग करके इसके कुछ हिस्सों को उत्पन्न करना है| - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -और - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -इसके साथ ही, आप मेनिफ़ेस्ट में नेटवर्क नाम और एड्रेसों को वेरिएबल प्लेसहोल्डर्स `{{network}}` और `{{address}}` से बदल देंगे और मेनिफ़ेस्ट का नाम बदल कर रखेंगे जैसे की उदा. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -किसी भी नेटवर्क के लिए एक मेनिफेस्ट उत्पन्न करने के लिए, आप `package.json` में `मूंछ` पर निर्भरता के साथ दो अतिरिक्त आदेश जोड़ सकते हैं: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -इसका एक कार्यशील उदाहरण [यहां](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759) पाया जा सकता है। - -**ध्यान दें:** यह दृष्टिकोण अधिक जटिल स्थितियों पर भी लागू किया जा सकता है, जहां कॉन्ट्रैक्ट एड्रेसों और नेटवर्क नामों से अधिक स्थानापन्न करना आवश्यक है या जहां टेम्प्लेट से मैपिंग या ABI भी उत्पन्न करना आवश्यक है. - -## सबग्राफ स्वास्थ्य की जाँच करना - -यदि एक सबग्राफ सफलतापूर्वक सिंक हो जाता है, तो यह एक अच्छा संकेत है कि यह हमेशा के लिए अच्छी तरह से चलता रहेगा। हालांकि, नेटवर्क पर नए ट्रिगर्स के कारण आपका सबग्राफ एक अनुपयोगी त्रुटि स्थिति में आ सकता है या यह प्रदर्शन समस्याओं या नोड ऑपरेटरों के साथ समस्याओं के कारण पीछे पड़ना शुरू हो सकता है। - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -यह आपको `chainHeadBlock` देगा जिसकी तुलना आप अपने सबग्राफ पर `latestBlock` से कर सकते हैं ताकि यह जांचा जा सके कि यह पीछे चल रहा है या नहीं। `सिंक किया गया` सूचित करता है कि क्या सबग्राफ कभी चेन तक पकड़ा गया है। `health` वर्तमान में `healthy` का मान ले सकता है यदि कोई त्रुटि नहीं हुई है, या `fail` यदि कोई त्रुटि थी जिसने सबग्राफ की प्रगति को रोक दिया था। इस स्थिति में, आप इस त्रुटि के विवरण के लिए `fatalError` फ़ील्ड की जांच कर सकते हैं। - -## होस्टेड सर्विस सबग्राफ आर्काइव नीति - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## सबग्राफ स्टूडियो सबग्राफ संग्रह नीति - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -इस नीति से प्रभावित प्रत्येक सबग्राफ के पास विचाराधीन संस्करण को वापस लाने का विकल्प है। diff --git a/website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index c87da9f54cad..000000000000 --- a/website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: सबग्राफ स्टूडियो में एक सबग्राफ तैनात करना ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- ग्राफ सीएलआई स्थापित करें (या तो यार्न या एनपीएम के साथ) -- सबग्राफ स्टूडियो में अपना सबग्राफ बनाएं -- सीएलआई से अपने खाते को प्रमाणित करें -- सबग्राफ स्टूडियो में एक सबग्राफ तैनात करना - -## ग्राफ सीएलआई स्थापित करना - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**यार्न के साथ स्थापित करें:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**एनपीएम के साथ स्थापित करें:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## सबग्राफ स्टूडियो में अपना सबग्राफ बनाएं - -अपना वास्तविक सबग्राफ तैनात करने से पहले आपको [सबग्राफ स्टूडियो](https://thegraph.com/studio/) में एक सबग्राफ बनाना होगा। इस बारे में अधिक जानने के लिए हम अनुशंसा करते हैं कि आप हमारे [स्टूडियो दस्तावेज़ीकरण](/deploying/subgraph-studio) को पढ़ें। - -## अपना सबग्राफ इनिशियलाइज़ करें - -एक बार सबग्राफ स्टूडियो में आपका सबग्राफ बन जाने के बाद आप इस कमांड का उपयोग करके सबग्राफ कोड को इनिशियलाइज़ कर सकते हैं: - -```bash -graph init --studio -``` - -`` सबग्राफ स्टूडियो में आपके सबग्राफ विवरण पृष्ठ पर मूल्य पाया जा सकता है: - -![सबग्राफ स्टूडियो अक्सर पूछे जाने वाले प्रश्न](/img/doc-subgraph-slug.png) - -`graph init` चलाने के बाद, आपको कॉन्ट्रैक्ट का पता, नेटवर्क और ABI को इनपुट करने के लिए कहा जाएगा जिसे आप क्वेरी करना चाहते हैं। ऐसा करने से आपकी स्थानीय मशीन पर आपके सबग्राफ पर काम करना शुरू करने के लिए कुछ बुनियादी कोड के साथ एक नया फ़ोल्डर उत्पन्न होगा। फिर आप यह सुनिश्चित करने के लिए अपने सबग्राफ को अंतिम रूप दे सकते हैं कि यह अपेक्षा के अनुरूप काम करता है। - -## ग्राफ प्रमाणीकरण - -अपने सबग्राफ को सबग्राफ स्टूडियो में तैनात करने से पहले, आपको सीएलआई के भीतर अपने खाते में लॉगिन करना होगा। ऐसा करने के लिए, आपको अपनी तैनाती key की आवश्यकता होगी जिसे आप अपने "मेरे सबग्राफ" पृष्ठ या अपने सबग्राफ विवरण पृष्ठ पर पा सकते हैं। - -यहां वह आदेश है जिसे आपको सीएलआई से प्रमाणित करने के लिए उपयोग करने की आवश्यकता है: - -```bash -graph auth --studio -``` - -## सबग्राफ स्टूडियो में एक सबग्राफ तैनात करना - -एक बार जब आप तैयार हो जाते हैं, तो आप अपने सबग्राफ को सबग्राफ स्टूडियो में तैनात कर सकते हैं। ऐसा करने से आपका सबग्राफ विकेंद्रीकृत नेटवर्क पर प्रकाशित नहीं होगा, यह केवल इसे आपके स्टूडियो खाते में तैनात करेगा जहां आप इसका परीक्षण कर सकेंगे और मेटाडेटा अपडेट कर सकेंगे। - -यहां सीएलआई कमांड है जिसे आपको अपना सबग्राफ तैनात करने के लिए उपयोग करने की आवश्यकता है। - -```bash -graph deploy --studio -``` - -इस आदेश को चलाने के बाद, सीएलआई एक संस्करण लेबल के लिए पूछेगा, आप इसे जैसा चाहें नाम दे सकते हैं, आप `0.1` और `0.2` जैसे लेबल का उपयोग कर सकते हैं या अक्षरों का भी उपयोग कर सकते हैं जैसे `uniswap-v2-0.1`। वे लेबल ग्राफ़ एक्सप्लोरर में दिखाई देंगे और क्यूरेटर द्वारा यह तय करने के लिए उपयोग किया जा सकता है कि वे इस संस्करण पर संकेत देना चाहते हैं या नहीं, इसलिए उन्हें बुद्धिमानी से चुनें। - -एक बार तैनात करने के बाद, आप खेल के मैदान का उपयोग करके सबग्राफ स्टूडियो में अपने सबग्राफ का परीक्षण कर सकते हैं, यदि आवश्यक हो तो एक और संस्करण तैनात कर सकते हैं, मेटाडेटा को अपडेट कर सकते हैं और जब आप तैयार हों, तो अपने सबग्राफ को ग्राफ़ एक्सप्लोरर में प्रकाशित करें। diff --git a/website/pages/hi/deploying/hosted-service.mdx b/website/pages/hi/deploying/hosted-service.mdx deleted file mode 100644 index f2d769cb959b..000000000000 --- a/website/pages/hi/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: होस्टेड सेवा क्या है? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -विस्तृत सूची के लिए, [समर्थित नेटवर्क](/Developing/supported-networks/#hosted-service) देखें। - -## एक सबग्राफ बनाएं - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### एक मौजूदा कॉन्ट्रैक्ट से - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -इस मामले में `` आपका GitHub उपयोगकर्ता या संगठन का नाम है, `` आपके सबग्राफ का नाम है, और `` निर्देशिका का वैकल्पिक नाम है जहां `ग्राफ़ इनिट` उदाहरण सबग्राफ़ मेनिफ़ेस्ट डालेगा। `` आपके मौजूदा अनुबंध का पता है। `` उस नेटवर्क का नाम है जिस पर अनुबंध रहता है। `` अनुबंध ABI फ़ाइल का स्थानीय पथ है। **दोनों `--नेटवर्क` और `--अबी` वैकल्पिक हैं।** - -### एक उदाहरण सबग्राफ से - -दूसरा मोड `graph init` सपोर्ट करता है, एक उदाहरण सबग्राफ से एक नया प्रोजेक्ट बना रहा है। निम्न आदेश यह करता है: - -``` -graph init --from-example --product hosted-service / [] -``` - -उदाहरण सबग्राफ दानी ग्रांट द्वारा ग्रेविटी कॉन्ट्रैक्ट पर आधारित है जो उपयोगकर्ता अवतारों का प्रबंधन करता है और जब भी अवतार बनाए या अपडेट किए जाते हैं तो `NewGravatar` या `UpdateGravatar` ईवेंट उत्सर्जित करता है। सबग्राफ इन घटनाओं को ग्राफ़ नोड स्टोर में `Gravatar` संस्थाओं को लिखकर और सुनिश्चित करता है कि इन्हें घटनाओं के अनुसार अपडेट किया जाता है। बेहतर ढंग से समझने के लिए [सबग्राफ मेनिफ़ेस्ट](/developing/creating-a-subgraph#the-subgraph-manifest) पर जारी रखें कि आपके स्मार्ट कॉन्ट्रैक्ट से किन इवेंट्स पर ध्यान देना है, मैपिंग आदि। - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -आप समर्थित नेटवर्क की सूची [यहां](/Developing/supported-networks) प्राप्त कर सकते हैं। diff --git a/website/pages/hi/deploying/subgraph-studio.mdx b/website/pages/hi/deploying/subgraph-studio.mdx deleted file mode 100644 index 68d04248a929..000000000000 --- a/website/pages/hi/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -आपके नए लॉन्चपैड 👩🏽‍🚀 में आपका स्वागत है - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- स्टूडियो यूआई के माध्यम से एक सबग्राफ बनाएं -- सीएलआई का उपयोग करके एक सबग्राफ तैनात करें -- स्टूडियो यूआई के साथ एक सबग्राफ प्रकाशित करें -- खेल के मैदान में इसका परीक्षण करें -- क्वेरी URL का उपयोग करके इसे स्टेजिंग में एकीकृत करें -- विशिष्ट सबग्राफ के लिए अपनी एपीआई keys बनाएं और प्रबंधित करें - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -सबग्राफ को क्वेरी करने से क्वेरी शुल्क उत्पन्न होता है, जिसका उपयोग ग्राफ़ नेटवर्क पर [इंडेक्सर्स](/network/indexing) को पुरस्कृत करने के लिए किया जाता है। यदि आप एक डैप डेवलपर या सबग्राफ डेवलपर हैं, तो स्टूडियो आपको अपने या अपने समुदाय के प्रश्नों को शक्ति देने के लिए बेहतर सबग्राफ बनाने में सक्षम करेगा। स्टूडियो में 5 मुख्य भाग होते हैं: - -- आपका उपयोगकर्ता खाता नियंत्रित करता है -- आपके द्वारा बनाए गए सबग्राफ की सूची -- किसी विशिष्ट सबग्राफ के प्रबंधन, विवरण देखने और स्थिति की कल्पना करने के लिए एक अनुभाग -- आपकी एपीआई keys को प्रबंधित करने के लिए एक अनुभाग जिसे आपको सबग्राफ से पूछताछ करने की आवश्यकता होगी -- आपकी बिलिंग प्रबंधित करने के लिए एक अनुभाग - -## अपना अकाउंट कैसे बनाये - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. एक बार जब आप साइन इन कर लेते हैं, तो आप अपने खाते के होम पेज पर अपनी विशिष्ट तैनात key देखेंगे। इससे आप या तो अपने सबग्राफ प्रकाशित कर सकेंगे या अपनी एपीआई key + बिलिंग प्रबंधित कर सकेंगे। आपके पास एक अनोखा तैनाती key होगी जिसे यदि आपको लगता है कि इससे समझौता किया गया है तो इसे फिर से उत्पन्न किया जा सकता है। - -## How to Create a Subgraph in Subgraph Studio - - - -## ग्राफ नेटवर्क के साथ सबग्राफ अनुकूलता - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- एक [समर्थित नेटवर्क](/developing/supported-networks) को इंडेक्स करें -- निम्नलिखित सुविधाओं में से किसी का उपयोग नहीं करना चाहिए: - - ipfs.cat & ipfs.map - - गैर-घातक त्रुटियाँ - - ग्राफ्टिंग - -अधिक सुविधाएँ & नेटवर्क को वृद्धिशील रूप से ग्राफ़ नेटवर्क में जोड़ा जाएगा। - -### सबग्राफ जीवनचक्र प्रवाह - -![सबग्राफ जीवनचक्र](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## सबग्राफ स्टूडियो में अपने सबग्राफ का परीक्षण करना - -यदि आप अपने सबग्राफ को नेटवर्क पर प्रकाशित करने से पहले उसका परीक्षण करना चाहते हैं, तो आप इसे सबग्राफ **प्लेग्राउंड** में कर सकते हैं या अपने लॉग देख सकते हैं। सबग्राफ लॉग आपको बताएंगे **कहां** आपका सबग्राफ उस मामले में विफल रहता है जहां यह विफल होता है। - -## सबग्राफ स्टूडियो में अपना सबग्राफ प्रकाशित करें - -यहां ता सफलतापूर्वक आने के लिए बधाई! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -नीचे दिए गए वीडियो अवलोकन को भी देखें: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -इंडेक्सर्स को एक विशिष्ट ब्लॉक हैश के रूप में इंडेक्सिंग रिकॉर्ड का अनिवार्य प्रमाण जमा करना होगा। क्योंकि एक सबग्राफ प्रकाशित करना एक श्रृंखला पर की गई कार्रवाई है, याद रखें कि लेन-देन को पूरा होने में कुछ मिनट लग सकते हैं। कॉन्ट्रैक्ट को प्रकाशित करने के लिए आपके द्वारा उपयोग किया जाने वाला कोई भी एड्रेस भविष्य के संस्करणों को प्रकाशित करने में सक्षम होगा। बुद्धिमानी से चुनना! - -क्यूरेशन सिग्नल वाले सबग्राफ इंडेक्सर्स को दिखाए जाते हैं ताकि उन्हें विकेंद्रीकृत नेटवर्क पर इंडेक्स किया जा सके। आप एक लेन-देन में सबग्राफ और सिग्नल प्रकाशित कर सकते हैं, जो आपको सबग्राफ पर पहला क्यूरेशन सिग्नल मिंट करने की अनुमति देता है और गैस की लागत बचाता है। बाद में क्यूरेटर द्वारा प्रदान किए गए सिग्नल में अपना सिग्नल जोड़ने से, आपके सबग्राफ के पास अंततः प्रश्नों को प्रस्तुत करने का एक उच्च अवसर होगा। - -**अब जब आपने अपना सबग्राफ प्रकाशित कर दिया है, तो आइए देखें कि आप उन्हें नियमित रूप से कैसे प्रबंधित करेंगे।** ध्यान दें कि यदि आपका सबग्राफ सिंक करने में विफल रहा है तो आप उसे नेटवर्क पर प्रकाशित नहीं कर सकते। ऐसा आमतौर पर इसलिए होता है क्योंकि सबग्राफ में बग होते हैं - लॉग आपको बताएंगे कि वे मुद्दे कहां मौजूद हैं! - -## सीएलआई के साथ अपने सबग्राफ का संस्करण बनाना - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -कृपया ध्यान दें कि नेटवर्क पर एक सबग्राफ के नए संस्करण को प्रकाशित करने से जुड़ी लागतें हैं। लेन-देन शुल्क के अलावा, डेवलपर्स को ऑटो-माइग्रेटिंग सिग्नल पर क्यूरेशन टैक्स का एक हिस्सा भी देना चाहिए। यदि क्यूरेटर ने इस पर संकेत नहीं दिया है तो आप अपने सबग्राफ का नया संस्करण प्रकाशित नहीं कर सकते। क्यूरेशन के जोखिमों के बारे में अधिक जानकारी के लिए, कृपया [यहां](/network/curating) अधिक पढ़ें। - -### सबग्राफ संस्करणों का स्वचालित संग्रह - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![सबग्राफ स्टूडियो - असंग्रहित](/img/Unarchive.png) diff --git a/website/pages/hi/developing/creating-a-subgraph.mdx b/website/pages/hi/developing/creating-a-subgraph.mdx deleted file mode 100644 index 33836a604550..000000000000 --- a/website/pages/hi/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: सबग्राफ बनाना ---- - -एक सबग्राफ एक ब्लॉकचेन से डेटा निकालता है, इसे प्रोसेस करता है और इसे स्टोर करता है ताकि इसे ग्राफक्यूएल के माध्यम से आसानी से क्वेरी किया जा सके। - -![एक सबग्राफ को परिभाषित करना](/img/defining-a-subgraph.png) - -सबग्राफ की परिभाषा में कुछ फाइलें होती हैं: - -- `subgraph.yaml`: एक YAML फ़ाइल जिसमें सबग्राफ मेनिफ़ेस्ट होता है - -- `schema.graphql`: एक ग्राफक्यूएल स्कीमा जो परिभाषित करता है कि आपके सबग्राफ के लिए कौन सा डेटा संग्रहीत है, और इसे ग्राफक्यूएल के माध्यम से कैसे क्वेरी करें - -- `AssemblyScript मैपिंग`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) कोड जो इवेंट डेटा से आपके स्कीमा में परिभाषित इकाइयों में अनुवाद करता है (उदाहरण के लिए `mapping.ts` इस ट्यूटोरियल में) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## . ग्राफ़ सीएलआई इनस्टॉल करें - -ग्राफ़ सीएलआई जावास्क्रिप्ट में लिखा गया है, और इसका उपयोग करने के लिए आपको या तो `yarn` या `npm` स्थापित करना होगा; यह माना जाता है कि आपके पास निम्नलिखित में yarn है। - -एक बार जब आपके पास `yarn` हो जाए, तो चलाकर ग्राफ़ सीएलआई स्थापित करें - -**Yarn के साथ स्थापित करें:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**एनपीएम के साथ स्थापित करें:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## एक मौजूदा कॉन्ट्रैक्ट से - -निम्न आदेश एक सबग्राफ बनाता है जो मौजूदा अनुबंध की सभी घटनाओं को अनुक्रमित करता है। यह एथरस्कैन से अनुबंध एबीआई लाने का प्रयास करता है और स्थानीय फ़ाइल पथ का अनुरोध करने के लिए वापस आ जाता है। यदि कोई वैकल्पिक तर्क गायब है, तो यह आपको एक संवादात्मक रूप में ले जाता है। - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` सबग्राफ स्टूडियो में आपके सबग्राफ की आईडी है, यह आपके सबग्राफ विवरण पृष्ठ पर पाया जा सकता है। - -## एक उदाहरण सबग्राफ से - -दूसरा मोड `graph init` सपोर्ट करता है, एक उदाहरण सबग्राफ से एक नया प्रोजेक्ट बना रहा है। निम्न आदेश यह करता है: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## मौजूदा सबग्राफ में नए डेटा स्रोत जोड़ें - -चूँकि `v0.31.0` `graph-cli` `graph add` कमांड के माध्यम से मौजूदा सबग्राफ में नए डेटा स्रोतों को जोड़ने का समर्थन करता है। - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -`--merge-entities` विकल्प यह बताता है कि डेवलपर `entity` और `event` नाम के विरोधों को कैसे हैंडल करना चाहता है: - -- अगर `सही`: नए `dataSource` को मौजूदा `eventHandlers` & `इकाइयां`। -- अगर `गलत`: एक नई इकाई & ईवेंट हैंडलर को `${dataSourceName}{EventName}` के साथ बनाया जाना चाहिए। - -संबंधित नेटवर्क के लिए `networks.json` को अनुबंध `पता` लिखा जाएगा। - -> **ध्यान दें:** इंटरैक्टिव क्ली का उपयोग करते समय, `ग्राफ़ इनिट` को सफलतापूर्वक चलाने के बाद, आपको एक नया `डेटा स्रोत` जोड़ने के लिए कहा जाएगा । - -## द सबग्राफ मेनिफेस्ट - -सबग्राफ मेनिफेस्ट `subgraph.yaml` आपके सबग्राफ इंडेक्स के स्मार्ट कॉन्ट्रैक्ट्स को परिभाषित करता है, इन कॉन्ट्रैक्ट्स से किन इवेंट्स पर ध्यान देना है, और इवेंट डेटा को उन संस्थाओं से कैसे मैप करना है जो ग्राफ़ नोड स्टोर करता है और क्वेरी करने की अनुमति देता है। सबग्राफ मेनिफ़ेस्ट के लिए पूर्ण विशिष्टता [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md) पाई जा सकती है। - -उदाहरण के सबग्राफ के लिए `subgraph.yaml` है: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -मेनिफेस्ट के लिए अद्यतन करने के लिए महत्वपूर्ण प्रविष्टियां हैं: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `विशेषताएं`: सभी प्रयुक्त [विशेषता](#experimental-features) नामों की सूची। - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: स्मार्ट अनुबंध का पता सबग्राफ स्रोत, और उपयोग करने के लिए स्मार्ट अनुबंध का ABI। पता वैकल्पिक है; इसे छोड़ने से सभी अनुबंधों से मिलान करने वाले ईवेंट को अनुक्रमित करने की अनुमति मिलती है। - -- `dataSources.source.startBlock`: उस ब्लॉक की वैकल्पिक संख्या जिससे डेटा स्रोत इंडेक्स करना शुरू करता है। ज्यादातर मामलों में, हम उस ब्लॉक का उपयोग करने का सुझाव देते हैं जिसमें अनुबंध बनाया गया था। - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: वे निकाय जो डेटा स्रोत स्टोर को लिखते हैं। प्रत्येक इकाई के लिए स्कीमा को schema.graphql फ़ाइल में परिभाषित किया गया है। - -- `dataSources.mapping.abis`: स्रोत अनुबंध के साथ-साथ मैपिंग के भीतर से आपके द्वारा इंटरैक्ट किए जाने वाले किसी भी अन्य स्मार्ट अनुबंध के लिए एक या अधिक नामित एबीआई फाइलें। - -- `dataSources.mapping.eventHandlers`: उन स्मार्ट कॉन्ट्रैक्ट इवेंट्स को सूचीबद्ध करता है जिन पर यह सबग्राफ प्रतिक्रिया करता है और मैपिंग में हैंडलर—./src/mapping.ts उदाहरण में — जो इन इवेंट्स को स्टोर में संस्थाओं में बदल देता है। - -- `dataSources.mapping.callHandlers`: उन स्मार्ट कॉन्ट्रैक्ट फ़ंक्शंस को सूचीबद्ध करता है जो इस सबग्राफ पर प्रतिक्रिया करता है और मैपिंग में हैंडलर जो इनपुट और आउटपुट को स्टोर में संस्थाओं में फ़ंक्शन कॉल में बदल देता है। - -- `dataSources.mapping.blockHandlers`: उन ब्लॉक्स को सूचीबद्ध करता है जिन पर यह सबग्राफ प्रतिक्रिया करता है और मैपिंग में हैंडलर्स को तब चलाया जाता है जब ब्लॉक को चेन से जोड़ा जाता है। फ़िल्टर के बिना, प्रत्येक ब्लॉक में ब्लॉक हैंडलर चलाया जाएगा। हैंडलर में `kind: call` के साथ एक `फ़िल्टर` फ़ील्ड जोड़कर एक वैकल्पिक कॉल-फ़िल्टर प्रदान किया जा सकता है। यह केवल हैंडलर चलाएगा यदि ब्लॉक में डेटा स्रोत अनुबंध के लिए कम से कम एक कॉल हो। - -एक सबग्राफ कई स्मार्ट कॉन्ट्रैक्ट्स से डेटा को इंडेक्स कर सकता है। प्रत्येक अनुबंध के लिए एक प्रविष्टि जोड़ें जिससे डेटा को `dataSources` सरणी में अनुक्रमित करने की आवश्यकता है। - -### Order of Triggering Handlers - -निम्नलिखित प्रक्रिया का उपयोग करके एक ब्लॉक के भीतर डेटा स्रोत के लिए ट्रिगर्स का आदेश दिया गया है: - -1. ईवेंट और कॉल ट्रिगर्स को पहले ब्लॉक के भीतर ट्रांजैक्शन इंडेक्स द्वारा ऑर्डर किया जाता है। -2. एक ही लेन-देन के भीतर ईवेंट और कॉल ट्रिगर्स को एक कन्वेंशन का उपयोग करके ऑर्डर किया जाता है: ईवेंट पहले ट्रिगर करता है फिर ट्रिगर्स को कॉल करता है, प्रत्येक प्रकार के ऑर्डर का सम्मान करते हुए उन्हें मेनिफेस्ट में परिभाषित किया जाता है। -3. ब्लॉक ट्रिगर इवेंट और कॉल ट्रिगर के बाद चलाए जाते हैं, जिस क्रम में उन्हें मेनिफेस्ट में परिभाषित किया गया है। - -ये आदेश नियम परिवर्तन के अधीन हैं। - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| संस्करण | रिलीज नोट्स | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### एबीआई प्राप्त करना - -एबीआई फाइल(फाइलों) को आपके अनुबंध(ओं) से मेल खाना चाहिए। ABI फ़ाइलें प्राप्त करने के कुछ तरीके हैं: - -- यदि आप अपना खुद का प्रोजेक्ट बना रहे हैं, तो आपके पास अपने सबसे मौजूदा एबीआई तक पहुंच होने की संभावना है। -- यदि आप किसी सार्वजनिक परियोजना के लिए सबग्राफ बना रहे हैं, तो आप उस परियोजना को अपने कंप्यूटर पर डाउनलोड कर सकते हैं और [`truffle संकलन का उपयोग करके ABI प्राप्त कर सकते हैं `](https://truffleframework.com/docs/truffle/overview) या संकलित करने के लिए solc का उपयोग करना। -- आप ABI को [Etherscan](https://etherscan.io/) पर भी ढूंढ सकते हैं, लेकिन यह हमेशा विश्वसनीय नहीं होता, क्योंकि वहां अपलोड किया गया ABI पुराना हो सकता है। सुनिश्चित करें कि आपके पास सही ABI है, अन्यथा आपका सबग्राफ चलाना विफल हो जाएगा। - -## ग्राफक्यूएल स्कीमा - -आपके सबग्राफ का स्कीमा फ़ाइल `schema.graphql` में है। ग्राफ़िकल स्कीमा को ग्राफ़िकल इंटरफ़ेस परिभाषा भाषा का उपयोग करके परिभाषित किया गया है। यदि आपने कभी भी ग्राफ़क्यूएल स्कीमा नहीं लिखा है, तो यह अनुशंसा की जाती है कि आप इस प्राइमर को ग्राफ़िकल टाइप सिस्टम पर देखें। ग्राफक्यूएल स्कीमा के लिए संदर्भ दस्तावेज [ग्राफक्यूएल एपीआई](/querying/graphql-api) अनुभाग में पाया जा सकता है। - -## संस्थाओं को परिभाषित करना - -संस्थाओं को परिभाषित करने से पहले, एक कदम पीछे हटना और यह सोचना महत्वपूर्ण है कि आपका डेटा कैसे संरचित और लिंक किया गया है। सबग्राफ स्कीमा में परिभाषित डेटा मॉडल और सबग्राफ द्वारा अनुक्रमित संस्थाओं के खिलाफ सभी प्रश्न किए जाएंगे। इस वजह से, सबग्राफ स्कीमा को इस तरह से परिभाषित करना अच्छा होता है जो आपके डैप की जरूरतों से मेल खाता हो। घटनाओं या कार्यों के बजाय "डेटा वाली वस्तुओं" के रूप में संस्थाओं की कल्पना करना उपयोगी हो सकता है। - -द ग्राफ़ के साथ, आप केवल `schema.graphql` में इकाई प्रकारों को परिभाषित करते हैं, और ग्राफ़ नोड उस इकाई प्रकार के एकल उदाहरणों और संग्रहों को क्वेरी करने के लिए शीर्ष स्तर के फ़ील्ड उत्पन्न करेगा। प्रत्येक प्रकार जो एक इकाई होना चाहिए उसे `@entity` निर्देश के साथ एनोटेट किया जाना आवश्यक है। डिफ़ॉल्ट रूप से, संस्थाएँ परिवर्तनशील होती हैं, जिसका अर्थ है कि मैपिंग मौजूदा संस्थाओं को लोड कर सकती है, उन्हें संशोधित कर सकती है और उस इकाई का एक नया संस्करण संग्रहीत कर सकती है। परिवर्तनशीलता एक मूल्य पर आती है, और इकाई प्रकारों के लिए जिनके लिए यह ज्ञात है कि उन्हें कभी भी संशोधित नहीं किया जाएगा, उदाहरण के लिए, क्योंकि उनमें केवल श्रृंखला से शब्दशः निकाले गए डेटा होते हैं, उन्हें `@entity के साथ अपरिवर्तनीय के रूप में चिह्नित करने की अनुशंसा की जाती है (अपरिवर्तनीय: सच)`। मैपिंग अपरिवर्तनीय संस्थाओं में तब तक परिवर्तन कर सकती है जब तक वे परिवर्तन उसी ब्लॉक में होते हैं जिसमें इकाई बनाई गई थी। अपरिवर्तनीय संस्थाएं लिखने और क्वेरी करने के लिए बहुत तेज़ हैं, और इसलिए जब भी संभव हो इसका उपयोग किया जाना चाहिए। - -### अच्छा उदाहरण - -नीचे दी गई `Gravatar` इकाई को Gravatar ऑब्जेक्ट के चारों ओर संरचित किया गया है और यह इस बात का एक अच्छा उदाहरण है कि किसी इकाई को कैसे परिभाषित किया जा सकता है। - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### खराब उदाहरण - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### वैकल्पिक और आवश्यक फ़ील्ड - -इकाई क्षेत्रों को आवश्यक या वैकल्पिक के रूप में परिभाषित किया जा सकता है। स्कीमा में आवश्यक फ़ील्ड `!` द्वारा दर्शाए गए हैं। यदि मैपिंग में कोई आवश्यक फ़ील्ड सेट नहीं है, तो फ़ील्ड की क्वेरी करते समय आपको यह त्रुटि प्राप्त होगी: - -``` -गैर-शून्य फ़ील्ड 'नाम' के लिए हल किया गया शून्य मान -``` - -प्रत्येक इकाई में एक `आईडी` फ़ील्ड होना चाहिए, जो कि `बाइट्स!` या `स्ट्रिंग!` प्रकार का होना चाहिए। आमतौर पर `बाइट्स!` का उपयोग करने की सिफारिश की जाती है, जब तक कि `आईडी` में मानव-पठनीय पाठ न हो, चूंकि `बाइट्स!` आईडी वाली इकाइयां लिखने में तेज़ होंगी और `स्ट्रिंग!` `id` वाले प्रश्नों के रूप में। `id` फ़ील्ड प्राथमिक कुंजी के रूप में कार्य करता है, और एक ही प्रकार की सभी संस्थाओं के बीच अद्वितीय होना चाहिए। ऐतिहासिक कारणों से, प्रकार `ID!` को भी स्वीकार किया जाता है और यह `स्ट्रिंग!` का पर्यायवाची है। - -कुछ इकाई प्रकारों के लिए `id` का निर्माण दो अन्य संस्थाओं की आईडी से किया जाता है; यह `concat` का उपयोग करके संभव है, उदाहरण के लिए, `let id = left.id.concat(right.id)` `left` की आईडी से आईडी बनाने के लिए और `दाएं`। इसी तरह, एक मौजूदा इकाई की आईडी से एक आईडी बनाने के लिए और एक काउंटर `गिनती`, `let id = left.id.concatI32(count)` का उपयोग किया जा सकता है। जब तक `बाएं` की लंबाई ऐसी सभी संस्थाओं के लिए समान है, उदाहरण के लिए, क्योंकि `left.id` एक `पता है, तब तक विशिष्ट आईडी बनाने के लिए संयोजन की गारंटी दी जाती है`। - -### बिल्ट-इन स्केलर प्रकार - -#### ग्राफक्यूएल समर्थित स्केलर्स - -हम अपने ग्राफक्यूएल एपीआई में निम्नलिखित स्केलर्स का समर्थन करते हैं: - -| प्रकार | विवरण | -| --- | --- | -| `Bytes` | बाइट सरणी, एक हेक्साडेसिमल स्ट्रिंग के रूप में दर्शाया गया है। आमतौर पर एथेरियम हैश और पतों के लिए उपयोग किया जाता है। | -| `String` | `स्ट्रिंग` मानों के लिए स्केलर। अशक्त वर्ण समर्थित नहीं हैं और स्वचालित रूप से हटा दिए जाते हैं। | -| `Boolean` | `boolean` मानों के लिए स्केलर। | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | बड़े पूर्णांक। एथेरियम के `uint32`, `int64`, `uint64`, ..., `uint256` प्रकारों के लिए उपयोग किया जाता है। नोट: `uint32` के नीचे सब कुछ, जैसे `int32`, `uint24` या `int8` को `i32` के रूप में दर्शाया गया है। | -| `BigDecimal` | `BigDecimal` उच्च परिशुद्धता दशमलव एक महत्व और एक प्रतिपादक के रूप में दर्शाया गया है। एक्सपोनेंट रेंज -6143 से +6144 तक है। 34 महत्वपूर्ण अंकों तक गोल। | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -आप स्कीमा के भीतर एनम भी बना सकते हैं। Enums में निम्न सिंटैक्स होता है: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -स्कीमा में एक बार एनम परिभाषित हो जाने के बाद, आप किसी इकाई पर एनम फ़ील्ड सेट करने के लिए एनम मान के स्ट्रिंग प्रतिनिधित्व का उपयोग कर सकते हैं। उदाहरण के लिए, आप पहले अपनी इकाई को परिभाषित करके और बाद में `entity.tokenStatus = "SecondOwner"` के साथ फ़ील्ड सेट करके `tokenStatus` को `SecondOwner` पर सेट कर सकते हैं। नीचे दिया गया उदाहरण दर्शाता है कि एनम फील्ड के साथ टोकन इकाई कैसी दिखेगी: - -एनम लिखने के बारे में अधिक विवरण [GraphQL प्रलेखन](https://graphql.org/learn/schema/) में पाया जा सकता है। - -#### निकाय संबंध - -एक इकाई का आपकी स्कीमा में एक या अधिक अन्य संस्थाओं से संबंध हो सकता है। आपके प्रश्नों में इन संबंधों का पता लगाया जा सकता है। ग्राफ़ में रिश्ते यूनिडायरेक्शनल हैं। रिश्ते के किसी भी "अंत" पर एक यूनिडायरेक्शनल रिश्ते को परिभाषित करके द्विपक्षीय संबंधों को अनुकरण करना संभव है। - -संस्थाओं पर संबंधों को किसी अन्य क्षेत्र की तरह ही परिभाषित किया जाता है सिवाय इसके कि निर्दिष्ट प्रकार किसी अन्य इकाई का है। - -#### एक-से-एक संबंध - -`TransactionReceipt` इकाई प्रकार के साथ एक वैकल्पिक एक-से-एक संबंध के साथ एक `लेन-देन` इकाई प्रकार परिभाषित करें: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### एक से कई रिश्ते - -टोकन निकाय प्रकार के साथ आवश्यक वन-टू-मैनी संबंध के साथ `TokenBalance` निकाय प्रकार परिभाषित करें: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### रिवर्स लुकअप - -उल्टे लुकअप को किसी इकाई पर `@derivedFrom` फ़ील्ड के माध्यम से परिभाषित किया जा सकता है। यह इकाई पर एक आभासी क्षेत्र बनाता है जिसे क्वेरी किया जा सकता है लेकिन मैपिंग एपीआई के माध्यम से मैन्युअल रूप से सेट नहीं किया जा सकता है। बल्कि, यह दूसरे निकाय पर परिभाषित संबंध से प्राप्त होता है। इस तरह के रिश्तों के लिए, रिश्ते के दोनों पक्षों को स्टोर करने के लिए यह शायद ही कभी समझ में आता है, और इंडेक्सिंग और क्वेरी प्रदर्शन दोनों बेहतर होंगे जब केवल एक पक्ष संग्रहीत किया जाता है और दूसरा व्युत्पन्न होता है। - -एक-से-अनेक संबंधों के लिए, संबंध को हमेशा 'एक' पक्ष में संग्रहीत किया जाना चाहिए, और 'अनेक' पक्ष हमेशा निकाला जाना चाहिए। संबंधों को इस तरह से संग्रहीत करने के बजाय, 'अनेक' पक्ष पर संस्थाओं की एक सरणी संग्रहीत करने के परिणामस्वरूप, सबग्राफ को अनुक्रमित करने और क्वेरी करने दोनों के लिए नाटकीय रूप से बेहतर प्रदर्शन होगा। सामान्य तौर पर, संस्थाओं की सरणियों को संग्रहीत करने से जितना संभव हो उतना बचा जाना चाहिए। - -#### उदाहरण - -`tokenBalances` फ़ील्ड प्राप्त करके हम टोकन से टोकन के लिए शेष राशि को सुलभ बना सकते हैं: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### अनेक-से-अनेक संबंध - -मैनी-टू-मैनी संबंधों के लिए, जैसे कि प्रत्येक उपयोगकर्ता किसी भी संख्या में संगठनों से संबंधित हो सकता है, सबसे सरल, लेकिन आम तौर पर सबसे अधिक प्रदर्शनकारी नहीं, संबंध को मॉडल करने का तरीका शामिल दो संस्थाओं में से प्रत्येक में एक सरणी के रूप में है। यदि संबंध सममित है, तो संबंध के केवल एक पक्ष को संग्रहित करने की आवश्यकता है और दूसरे पक्ष को व्युत्पन्न किया जा सकता है। - -#### उदाहरण - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -इस संबंध को संग्रहीत करने का एक अधिक प्रभावशाली तरीका एक मैपिंग टेबल के माध्यम से है जिसमें प्रत्येक `उपयोगकर्ता` / `संगठन` जोड़ी के लिए स्कीमा के साथ एक प्रविष्टि है - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -इस दृष्टिकोण के लिए आवश्यक है कि प्रश्नों को पुनः प्राप्त करने के लिए एक अतिरिक्त स्तर में उतरना पड़े, उदाहरण के लिए, उपयोगकर्ताओं के लिए संगठन: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -मैनी-टू-मैनी संबंधों को संग्रहीत करने के इस अधिक विस्तृत तरीके के परिणामस्वरूप सबग्राफ के लिए कम डेटा संग्रहीत होगा, और इसलिए एक सबग्राफ के लिए जो अक्सर इंडेक्स और क्वेरी के लिए नाटकीय रूप से तेज़ होता है। - -#### स्कीमा में टिप्पणियां जोड़ना - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## फुलटेक्स्ट सर्च फील्ड्स को परिभाषित करना - -पूर्ण पाठ खोज क्वेरी फ़िल्टर करती है और पाठ खोज इनपुट के आधार पर संस्थाओं को रैंक करती है। अनुक्रमित टेक्स्ट डेटा से तुलना करने से पहले क्वेरी टेक्स्ट इनपुट को तने में संसाधित करके पूर्ण टेक्स्ट क्वेरी समान शब्दों के लिए मैच वापस करने में सक्षम हैं। - -एक पूर्ण पाठ क्वेरी परिभाषा में क्वेरी का नाम, पाठ फ़ील्ड को संसाधित करने के लिए उपयोग किया जाने वाला भाषा शब्दकोश, परिणामों को क्रमबद्ध करने के लिए उपयोग किया जाने वाला रैंकिंग एल्गोरिदम और खोज में शामिल फ़ील्ड शामिल हैं। प्रत्येक पूर्ण-पाठ क्वेरी में कई फ़ील्ड शामिल हो सकते हैं, लेकिन सभी शामिल फ़ील्ड एक इकाई प्रकार से होने चाहिए। - -फुलटेक्स्ट क्वेरी जोड़ने के लिए, ग्राफक्यूएल स्कीमा में एक `_Schema_` टाइप को फुलटेक्स्ट डायरेक्टिव के साथ शामिल करें। - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -`नाम`, `विवरणबैंड` इकाइयों को फ़िल्टर करने के लिए उदाहरण `बैंडसर्च` फ़ील्ड का उपयोग प्रश्नों में किया जा सकता है >, और `जैव` फ़ील्ड। पूर्ण पाठ खोज API और अधिक उदाहरण उपयोग के विवरण के लिए [GraphQL API - क्वेरीज़](/querying/graphql-api#queries) पर जायें। - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[सुविधा प्रबंधन](#experimental-features):** `specVersion` `0.0.4 और आगे, fullTextSearch` को सबग्राफ मेनिफेस्ट में `फीचर्स` सेक्शन के तहत घोषित किया जाना चाहिए। - -### भाषाओं का समर्थन किया - -एक अलग भाषा का चयन एक निश्चित, हालांकि कभी-कभी सूक्ष्म, पूर्ण पाठ खोज एपीआई पर प्रभाव डालेगा। पूर्ण-पाठ क्वेरी फ़ील्ड द्वारा कवर किए गए फ़ील्ड को चुनी हुई भाषा के संदर्भ में जांचा जाता है, इसलिए विश्लेषण और खोज क्वेरी द्वारा उत्पन्न शब्दांश भाषा से भाषा में भिन्न होते हैं। उदाहरण के लिए: समर्थित तुर्की शब्दकोश "टोकन" का उपयोग करते समय "टोकन" के लिए स्टेम किया जाता है, जबकि निश्चित रूप से, अंग्रेजी शब्दकोश इसे "टोकन" के लिए स्टेम करेगा। - -समर्थित भाषा शब्दकोश: - -| कोड | शब्दकोष | -| ------ | ---------- | -| simple | आम | -| da | डेनिश | -| nl | डच | -| en | अंग्रेज़ी | -| fi | फिनिश | -| fr | फ्रेंच | -| de | जर्मन | -| hu | हंगेरी | -| it | इतालवी | -| no | नार्वेजियन | -| pt | पुर्तगाली | -| ro | रोमानियाई | -| ru | रूसी | -| es | स्पैनिश | -| sv | स्वीडिश | -| tr | तुर्की | - -### रैंकिंग एल्गोरिदम - -परिणाम ऑर्डर करने के लिए समर्थित एल्गोरिदम: - -| एल्गोरिदम | विवरण | -| ------------- | ----------------------------------------------------------------------------------------- | -| rank | परिणामों को व्यवस्थित करने के लिए पूर्ण पाठ क्वेरी की मिलान गुणवत्ता (0-1) का उपयोग करें। | -| proximityRank | रैंक के समान लेकिन इसमें मैचों की निकटता भी शामिल है। | - -## मैपिंग लिखना - -मैपिंग एक विशेष स्रोत से डेटा लेती है और इसे उन संस्थाओं में बदल देती है जो आपके स्कीमा के भीतर परिभाषित हैं। मैपिंग को [टाइपस्क्रिप्ट](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) के उपसमुच्चय में लिखा जाता है जिसे [असेंबलीस्क्रिप्ट](https://github.com/AssemblyScript/assemblyscript/wiki) कहा जाता है जिसे WASM ([WebAssembly](https://webassembly.org/)) में संकलित किया जा सकता है। असेंबलीस्क्रिप्ट सामान्य टाइपस्क्रिप्ट की तुलना में सख्त है, फिर भी एक परिचित सिंटैक्स प्रदान करता है। - -प्रत्येक ईवेंट हैंडलर के लिए जो `subgraph.yaml` `mapping.eventHandlers` के अंतर्गत परिभाषित है, उसी नाम का एक निर्यातित फ़ंक्शन बनाएं। प्रत्येक हैंडलर को `event` नामक एकल पैरामीटर को उस ईवेंट के नाम के अनुरूप एक प्रकार के साथ स्वीकार करना होगा जिसे संभाला जा रहा है। - -उदाहरण के सबग्राफ में, `src/mapping.ts` में `NewGravatar` और `UpdatedGravatar` ईवेंट के लिए हैंडलर शामिल हैं: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -पहला हैंडलर `NewGravatar` ईवेंट लेता है और `new Gravatar(event.params.id.toHex())` के साथ एक नया `Gravatar` इकाई बनाता है, जो पॉप्युलेट करता है संबंधित ईवेंट पैरामीटर का उपयोग करके निकाय फ़ील्ड. इस इकाई उदाहरण को `event.params.id.toHex()` के आईडी मान के साथ वेरिएबल `gravatar` द्वारा दर्शाया गया है। - -दूसरा हैंडलर ग्राफ़ नोड स्टोर से मौजूदा `Gravatar` को लोड करने का प्रयास करता है। यदि यह अभी तक मौजूद नहीं है, तो इसे ऑन-डिमांड बनाया गया है। इसके बाद इकाई को `gravatar.save()` का उपयोग करके स्टोर में वापस सहेजे जाने से पहले नए ईवेंट पैरामीटर से मेल खाने के लिए अपडेट किया जाता है। - -### नई संस्थाओं को बनाने के लिए अनुशंसित आईडी - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## कोड जनरेशन - -स्मार्ट कॉन्ट्रैक्ट्स, इवेंट्स और एंटिटीज के साथ काम करना आसान और टाइप-सेफ बनाने के लिए, ग्राफ सीएलआई सबग्राफ के ग्राफक्यूएल स्कीमा और डेटा स्रोतों में शामिल कॉन्ट्रैक्ट एबीआई से असेंबलीस्क्रिप्ट प्रकार उत्पन्न कर सकता है। - -इसके साथ किया जाता है - -```sh -graph codegen [--output-dir ] [] -``` - -लेकिन ज्यादातर मामलों में, सबग्राफ पहले से ही `package.json` के माध्यम से पूर्व-कॉन्फ़िगर किए जाते हैं ताकि आप इसे प्राप्त करने के लिए निम्न में से किसी एक को चला सकें: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -यह `subgraph.yaml` में वर्णित एबीआई फाइलों में प्रत्येक स्मार्ट अनुबंध के लिए एक असेंबलीस्क्रिप्ट क्लास उत्पन्न करेगा, जिससे आप इन अनुबंधों को मैपिंग में विशिष्ट पतों से बाँध सकते हैं और ब्लॉक होने के खिलाफ केवल-पढ़ने वाली अनुबंध विधियों को कॉल कर सकते हैं। संसाधित। यह प्रत्येक अनुबंध घटना के लिए एक वर्ग भी उत्पन्न करेगा, जो घटना के मापदंडों तक आसान पहुँच प्रदान करेगा, साथ ही ब्लॉक और लेन-देन की घटना से उत्पन्न हुआ। ये सभी प्रकार `//.ts` को लिखे जाते हैं। उदाहरण के सबग्राफ में, यह `उत्पन्न/ग्रेविटी/ग्रेविटी.टीएस` होगा, जिससे मैपिंग को इन प्रकारों को आयात करने की अनुमति मिलेगी। - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -इसके अलावा, सबग्राफ के ग्राफक्यूएल स्कीमा में प्रत्येक इकाई प्रकार के लिए एक वर्ग उत्पन्न होता है। ये क्लासेस टाइप-सेफ एंटिटी लोडिंग, एंटिटी फील्ड्स तक रीड और राइट एक्सेस के साथ-साथ स्टोर करने के लिए एंटिटीज लिखने के लिए `save()` मेथड मुहैया कराती हैं। सभी निकाय वर्ग `/schema.ts` पर लिखे गए हैं, जिससे मैपिंग उन्हें आयात करने की अनुमति देती है - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **ध्यान दें:** मेनिफेस्ट में शामिल ग्राफ़क्यूएल स्कीमा या एबीआई में हर बदलाव के बाद कोड जेनरेशन फिर से किया जाना चाहिए। सबग्राफ बनाने या तैनात करने से पहले इसे कम से कम एक बार अवश्य किया जाना चाहिए। - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## डेटा स्रोत टेम्पलेट्स - -ईवीएम-संगत स्मार्ट अनुबंधों में एक सामान्य पैटर्न रजिस्ट्री या फ़ैक्टरी अनुबंधों का उपयोग होता है, जहां एक अनुबंध अन्य अनुबंधों की मनमानी संख्या का निर्माण, प्रबंधन या संदर्भ करता है, जिनमें से प्रत्येक का अपना राज्य और ईवेंट होता है। - -इन उप-अनुबंधों के पते पहले से ज्ञात हो सकते हैं या नहीं भी हो सकते हैं और इनमें से कई अनुबंध समय के साथ बनाए और/या जोड़े जा सकते हैं। यही कारण है कि, ऐसे मामलों में, एक डेटा स्रोत या डेटा स्रोतों की एक निश्चित संख्या को परिभाषित करना असंभव है और अधिक गतिशील दृष्टिकोण की आवश्यकता है: _डेटा स्रोत टेम्पलेट्स_। - -### मुख्य अनुबंध के लिए डेटा स्रोत - -सबसे पहले, आप मुख्य अनुबंध के लिए एक नियमित डेटा स्रोत परिभाषित करते हैं। नीचे दिया गया स्निपेट [Uniswap](https://uniswap.org) एक्सचेंज फैक्ट्री अनुबंध के लिए एक सरलीकृत उदाहरण डेटा स्रोत दिखाता है। `NewExchange(address,address)` ईवेंट हैंडलर पर ध्यान दें। यह तब उत्सर्जित होता है जब फ़ैक्टरी अनुबंध द्वारा एक नया एक्सचेंज अनुबंध ऑन-चेन बनाया जाता है। - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### गतिशील रूप से बनाए गए अनुबंधों के लिए डेटा स्रोत टेम्प्लेट - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### डेटा स्रोत टेम्प्लेट को इंस्टेंट करना - -अंतिम चरण में, आप किसी एक टेम्प्लेट से डायनेमिक डेटा स्रोत इंस्टेंस बनाने के लिए अपनी मुख्य अनुबंध मैपिंग को अपडेट करते हैं। इस उदाहरण में, आप `Exchange` टेम्पलेट को इम्पोर्ट करने के लिए मुख्य कॉन्ट्रैक्ट मैपिंग को बदलेंगे और नए एक्सचेंज कॉन्ट्रैक्ट को इंडेक्स करना शुरू करने के लिए उस पर `Exchange.create(address)` मेथड को कॉल करेंगे। - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **ध्यान दें:** एक नया डेटा स्रोत केवल उस ब्लॉक के लिए कॉल और ईवेंट को प्रोसेस करेगा जिसमें इसे बनाया गया था और सभी बाद के ब्लॉक, लेकिन ऐतिहासिक डेटा, यानी डेटा को प्रोसेस नहीं करेगा जो पिछले ब्लॉकों में निहित है। -> -> यदि पिछले ब्लॉक में नए डेटा स्रोत के लिए प्रासंगिक डेटा है, तो उस डेटा को अनुबंध की वर्तमान स्थिति को पढ़कर और नए डेटा स्रोत के निर्माण के समय उस स्थिति का प्रतिनिधित्व करने वाली संस्थाओं का निर्माण करना सबसे अच्छा है। - -### डेटा स्रोत प्रसंग - -डेटा स्रोत संदर्भ किसी टेम्प्लेट को इंस्टेंट करते समय अतिरिक्त कॉन्फ़िगरेशन पास करने की अनुमति देते हैं। हमारे उदाहरण में, मान लें कि एक्सचेंज एक विशेष ट्रेडिंग जोड़ी से जुड़े हैं, जो `NewExchange` इवेंट में शामिल है। उस जानकारी को तात्कालिक डेटा स्रोत में पास किया जा सकता है, जैसे: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -`Exchange` टेम्प्लेट की मैपिंग के अंदर, संदर्भ तक पहुँचा जा सकता है: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -सभी प्रकार के मूल्य के लिए `setString` और `getString` जैसे सेटर्स और गेटर्स हैं। - -## Start Blocks - -`StartBlock` एक वैकल्पिक सेटिंग है जो आपको यह परिभाषित करने की अनुमति देती है कि श्रृंखला में किस ब्लॉक से डेटा स्रोत अनुक्रमण शुरू करेगा। स्टार्ट ब्लॉक सेट करने से डेटा स्रोत संभावित रूप से अप्रासंगिक लाखों ब्लॉक को छोड़ देता है। आमतौर पर, एक सबग्राफ डेवलपर `स्टार्टब्लॉक` को उस ब्लॉक पर सेट करेगा जिसमें डेटा स्रोत का स्मार्ट अनुबंध बनाया गया था। - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **ध्यान दें:** इथरस्कैन पर अनुबंध निर्माण ब्लॉक को जल्दी से देखा जा सकता है: -> -> 1. खोज बार में उसका पता दर्ज करके अनुबंध की खोज करें। -> 2. `अनुबंध निर्माता` अनुभाग में निर्माण लेनदेन हैश पर क्लिक करें। -> 3. लेन-देन विवरण पृष्ठ लोड करें जहां आपको उस अनुबंध के लिए प्रारंभ ब्लॉक मिलेगा। - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## कॉल हैंडलर्स - -जबकि घटनाएँ एक अनुबंध की स्थिति में प्रासंगिक परिवर्तन एकत्र करने का एक प्रभावी तरीका प्रदान करती हैं, कई अनुबंध गैस लागतों को अनुकूलित करने के लिए लॉग उत्पन्न करने से बचते हैं। इन मामलों में, एक सबग्राफ डेटा स्रोत अनुबंध में किए गए कॉलों की सदस्यता ले सकता है। यह फ़ंक्शन सिग्नेचर और मैपिंग हैंडलर को संदर्भित करने वाले कॉल हैंडलर को परिभाषित करके प्राप्त किया जाता है जो इस फ़ंक्शन को कॉल संसाधित करेगा। इन कॉल्स को प्रोसेस करने के लिए, मैपिंग हैंडलर को `ethereum.Call` एक आर्गुमेंट के रूप में प्राप्त होगा, जिसमें टाइप किए गए इनपुट और कॉल के आउटपुट होंगे। लेन-देन की कॉल श्रृंखला में किसी भी गहराई पर किए गए कॉल मैपिंग को ट्रिगर करेंगे, जिससे प्रॉक्सी अनुबंधों के माध्यम से डेटा स्रोत अनुबंध के साथ गतिविधि को कैप्चर किया जा सकेगा। - -कॉल हैंडलर केवल दो मामलों में से एक में ट्रिगर होंगे: जब निर्दिष्ट फ़ंक्शन को अनुबंध के अलावा किसी अन्य खाते द्वारा कॉल किया जाता है या जब इसे सॉलिडिटी में बाहरी के रूप में चिह्नित किया जाता है और उसी अनुबंध में किसी अन्य फ़ंक्शन के भाग के रूप में कॉल किया जाता है। - -> **ध्यान दें:** कॉल हैंडलर वर्तमान में समता अनुरेखण API पर निर्भर करते हैं। बीएनबी चेन और आर्बिट्रम जैसे कुछ नेटवर्क इस एपीआई का समर्थन नहीं करते हैं। यदि इन नेटवर्कों में से किसी एक सबग्राफ इंडेक्सिंग में एक या अधिक कॉल हैंडलर हैं, तो यह सिंक करना शुरू नहीं करेगा। सबग्राफ डेवलपर्स को इसके बजाय इवेंट हैंडलर्स का उपयोग करना चाहिए। ये कॉल हैंडलर्स की तुलना में कहीं अधिक प्रदर्शनकारी हैं, और प्रत्येक ईवीएम नेटवर्क पर समर्थित हैं। - -### कॉल हैंडलर को परिभाषित करना - -अपने मेनिफेस्ट में कॉल हैंडलर को परिभाषित करने के लिए, आप जिस डेटा स्रोत की सदस्यता लेना चाहते हैं, उसके तहत बस एक `callHandlers` सरणी जोड़ें। - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`फ़ंक्शन` कॉल को फ़िल्टर करने के लिए सामान्यीकृत फ़ंक्शन सिग्नेचर है। `हैंडलर` गुण आपकी मैपिंग में उस फ़ंक्शन का नाम है जिसे आप तब निष्पादित करना चाहेंगे जब डेटा स्रोत अनुबंध में लक्ष्य फ़ंक्शन को कॉल किया जाता है। - -### मानचित्रण समारोह - -प्रत्येक कॉल हैंडलर एक पैरामीटर लेता है जिसमें कॉल किए गए फ़ंक्शन के नाम से संबंधित प्रकार होता है। उपरोक्त उदाहरण सबग्राफ में, मैपिंग में एक हैंडलर होता है जब `createGravatar` फ़ंक्शन को कॉल किया जाता है और `CreateGravatarCall` पैरामीटर को तर्क के रूप में प्राप्त होता है: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## ब्लॉक हैंडलर - -Contract events या function calls की सदस्यता लेने के अलावा, एक subgraph अपने data को update करना चाह सकता है क्योंकि chain में नए blocks जोड़े जाते हैं। इसे प्राप्त करने के लिए एक subgraph every block के बाद या pre-defined filter से match होन वाले block के बाद एक function चला सकता है। - -### समर्थित फ़िल्टर - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_परिभाषित हैंडलर को प्रत्येक ब्लॉक के लिए एक बार कॉल किया जाएगा जिसमें अनुबंध (डेटा स्रोत) के लिए कॉल शामिल है जिसे हैंडलर के तहत परिभाषित किया गया है।_ - -> **ध्यान दें:** `कॉल` फ़िल्टर फ़िलहाल पैरिटी ट्रेसिंग API पर निर्भर करता है। बीएनबी चेन और आर्बिट्रम जैसे कुछ नेटवर्क इस एपीआई का समर्थन नहीं करते हैं। अगर इन नेटवर्कों में से किसी एक सबग्राफ इंडेक्सिंग में `कॉल` फिल्टर के साथ एक या अधिक ब्लॉक हैंडलर हैं, तो यह सिंक करना शुरू नहीं करेगा। - -ब्लॉक हैंडलर के लिए फ़िल्टर की अनुपस्थिति सुनिश्चित करेगी कि हैंडलर को प्रत्येक ब्लॉक कहा जाता है। डेटा स्रोत में प्रत्येक फ़िल्टर प्रकार के लिए केवल एक ब्लॉक हैंडलर हो सकता है। - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### मानचित्रण समारोह - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## अनाम घटनाएँ - -यदि आपको अनाम घटनाओं को सॉलिडिटी में संसाधित करने की आवश्यकता है, तो इसे घटना के विषय 0 प्रदान करके प्राप्त किया जा सकता है, जैसा कि उदाहरण में है: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -एक घटना तभी शुरू होगी जब दोनों हस्ताक्षर और विषय 0 मेल खाते हों। डिफ़ॉल्ट रूप से, `topic0` इवेंट सिग्नेचर के हैश के बराबर है। - -## इवेंट हैंडलर में लेनदेन रसीदें - -`specVersion` `0.0.5` और `apiVersion` `0.0.7` से शुरू होकर, ईवेंट हैंडलर्स के पास रसीद तक ​​पहुंच हो सकती है लेनदेन जो उन्हें उत्सर्जित करता है। - -ऐसा करने के लिए, इवेंट हैंडलर को सबग्राफ मेनिफेस्ट में नई `receipt: true` कुंजी के साथ घोषित किया जाना चाहिए, जो वैकल्पिक है और डिफ़ॉल्ट रूप से गलत है। - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -हैंडलर फ़ंक्शन के अंदर, रसीद को `Event.receipt` फ़ील्ड में एक्सेस किया जा सकता है। जब `receipt` कुंजी को `गलत` पर सेट किया जाता है या मेनिफ़ेस्ट में छोड़ दिया जाता है, तो इसके बजाय एक `शून्य` मान लौटाया जाएगा। - -## प्रायोगिक विशेषताएं - -`specVersion` `0.0.4` से शुरू होकर, सबग्राफ सुविधाओं को `सुविधाओं` अनुभाग में मेनिफेस्ट फ़ाइल के शीर्ष स्तर पर उनके का उपयोग करके स्पष्ट रूप से घोषित किया जाना चाहिए `camelCase` नाम, जैसा कि नीचे दी गई तालिका में सूचीबद्ध है: - -| विशेषता | नाम | -| -------------------------------------------------- | ---------------- | -| [गैर-घातक त्रुटियाँ](#non-fatal-errors) | `nonFatalErrors` | -| [पूरा पाठ खोजें](#defining-fulltext-search-fields) | `fullTextSearch` | -| [ग्राफ्टिंग](#grafting-onto-existing-subgraphs) | `grafting` | - -उदाहरण के लिए, यदि कोई सबग्राफ **पूर्ण-पाठ खोज** और **गैर-घातक त्रुटियां** सुविधाओं का उपयोग करता है, तो मेनिफेस्ट में `सुविधाएं` फ़ील्ड होना चाहिए: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -ध्यान दें कि किसी सुविधा को घोषित किए बिना उसका उपयोग करने से सबग्राफ परिनियोजन के दौरान **सत्यापन त्रुटि** उत्पन्न होगी, लेकिन यदि सुविधा घोषित की जाती है लेकिन उसका उपयोग नहीं किया जाता है, तो कोई त्रुटि उत्पन्न नहीं होगी. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### गैर-घातक त्रुटियाँ - -पहले से सिंक किए गए सबग्राफ पर इंडेक्सिंग त्रुटियां, डिफ़ॉल्ट रूप से, सबग्राफ को विफल कर देंगी और सिंक करना बंद कर देंगी। सबग्राफ को वैकल्पिक रूप से त्रुटियों की उपस्थिति में समन्वयन जारी रखने के लिए कॉन्फ़िगर किया जा सकता है, हैंडलर द्वारा किए गए परिवर्तनों को अनदेखा करके त्रुटि उत्पन्न हुई। यह सबग्राफ लेखकों को अपने सबग्राफ को ठीक करने का समय देता है, जबकि नवीनतम ब्लॉक के विरुद्ध प्रश्नों को जारी रखा जाता है, हालांकि त्रुटि के कारण बग के कारण परिणाम असंगत हो सकते हैं। ध्यान दें कि कुछ त्रुटियाँ अभी भी हमेशा घातक होती हैं। गैर-घातक होने के लिए, त्रुटि नियतात्मक होने के लिए जानी जानी चाहिए। - -> **नोट:** ग्राफ़ नेटवर्क अभी तक गैर-घातक त्रुटियों का समर्थन नहीं करता है, और डेवलपर्स को स्टूडियो के माध्यम से नेटवर्क पर उस कार्यक्षमता का उपयोग करके सबग्राफ तैनात नहीं करना चाहिए। - -गैर-घातक त्रुटियों को सक्षम करने के लिए सबग्राफ मेनिफ़ेस्ट पर निम्न फ़ीचर फ़्लैग सेट करने की आवश्यकता होती है: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -क्वेरी को `subgraphError` तर्क के माध्यम से संभावित विसंगतियों वाले डेटा को क्वेरी करने के लिए भी ऑप्ट-इन करना चाहिए। यह जाँचने के लिए `_meta` को क्वेरी करने की भी सिफारिश की जाती है कि क्या सबग्राफ ने त्रुटियों को छोड़ दिया है, जैसा कि उदाहरण में दिया गया है: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -यदि सबग्राफ में कोई त्रुटि आती है, तो वह क्वेरी `"indexing_error"` संदेश के साथ डेटा और ग्राफ़कल त्रुटि दोनों वापस कर देगी, जैसा कि इस उदाहरण प्रतिक्रिया में है: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### मौजूदा सबग्राफ पर ग्राफ्टिंग - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -जब `subgraph.yaml` में सबग्राफ मेनिफेस्ट में `graft` ब्लॉक होता है तो एक सबग्राफ को बेस सबग्राफ पर ग्राफ्ट किया जाता है: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -जब एक सबग्राफ जिसके मेनिफ़ेस्ट में `graft` ब्लॉक होता है, तैनात किया जाता है, तो ग्राफ़ नोड दिए गए `ब्लॉक` सहित `base` सबग्राफ़ के डेटा को कॉपी करेगा और उसके बाद उस ब्लॉक से नए सबग्राफ को अनुक्रमणित करना जारी रखें। बेस सबग्राफ लक्ष्य ग्राफ़ नोड उदाहरण पर मौजूद होना चाहिए और कम से कम दिए गए ब्लॉक तक अनुक्रमित होना चाहिए। इस प्रतिबंध के कारण, ग्राफ्टिंग का उपयोग केवल विकास के दौरान या किसी आपात स्थिति के दौरान समकक्ष गैर-ग्राफ्टेड सबग्राफ के उत्पादन में तेजी लाने के लिए किया जाना चाहिए। - -क्योंकि आधार डेटा को अनुक्रमित करने के बजाय प्रतियों को ग्राफ्ट करना, स्क्रैच से अनुक्रमणित करने की तुलना में सबग्राफ को वांछित ब्लॉक में प्राप्त करना बहुत तेज है, हालांकि बहुत बड़े सबग्राफ के लिए प्रारंभिक डेटा कॉपी में अभी भी कई घंटे लग सकते हैं। जबकि ग्राफ्टेड सबग्राफ को इनिशियलाइज़ किया जा रहा है, ग्राफ़ नोड उन एंटिटी प्रकारों के बारे में जानकारी लॉग करेगा जो पहले ही कॉपी किए जा चुके हैं। - -ग्राफ्टेड सबग्राफ एक ग्राफक्यूएल स्कीमा का उपयोग कर सकता है जो बेस सबग्राफ के समान नहीं है, लेकिन इसके साथ केवल संगत है। यह अपने आप में एक मान्य सबग्राफ स्कीमा होना चाहिए, लेकिन निम्नलिखित तरीकों से बेस सबग्राफ के स्कीमा से विचलित हो सकता है: - -- यह इकाई प्रकार जोड़ता या हटाता है -- यह इकाई प्रकारों से विशेषताएँ निकालता है -- यह प्रभावहीन गुणों को इकाई प्रकारों में जोड़ता है| -- यह प्रभाव वाले गुणों को प्रभावहीन गुणों में बदल देता है| -- यह इनम्स में महत्व देता है| -- यह इंटरफेस जोड़ता या हटाता है| -- यह कि, किन इकाई प्रकारों के लिए इंटरफ़ेस लागू होगा, इसे बदल देता है| - -> **[सुविधा प्रबंधन](#experimental-features):** `ग्राफ्टिंग` को `विशेषताओं` के अंतर्गत घोषित किया जाना चाहिए कोड सबग्राफ मेनिफेस्ट में। - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> यह ऑफ-चेन डेटा के नियतात्मक अनुक्रमण के साथ-साथ स्वैच्छिक HTTP-स्रोत डेटा के संभावित परिचय के लिए आधार भी देता है। - -### अवलोकन - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> यह मौजूदा `ipfs.cat` API को प्रतिस्थापित करता है - -### Upgrade guide - -#### `ग्राफ़-टीएस` और `ग्राफ़-क्ली` अपडेट करें - -फ़ाइल डेटा स्रोतों के लिए ग्राफ़-टीएस >=0.29.0 और ग्राफ़-क्ली >=0.33.1 की आवश्यकता होती है - -#### एक नया इकाई प्रकार जोड़ें जो फ़ाइलें मिलने पर अपडेट किया जाएगा - -फ़ाइल डेटा स्रोत श्रृंखला-आधारित संस्थाओं तक पहुँच या अद्यतन नहीं कर सकते हैं, लेकिन फ़ाइल विशिष्ट संस्थाओं को अद्यतन करना चाहिए। - -इसका मतलब हो सकता है कि फ़ील्ड को मौजूदा इकाइयों से अलग-अलग इकाइयों में विभाजित करना, एक साथ जुड़े हुए। - -मूल संयुक्त इकाई: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -नई, विभाजित इकाई: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -यदि पैरेंट इकाई और परिणामी फ़ाइल डेटा स्रोत इकाई के बीच संबंध 1:1 है, तो सबसे सरल पैटर्न मूल इकाई को लुकअप के रूप में IPFS CID का उपयोग करके परिणामी फ़ाइल इकाई से लिंक करना है। यदि आपको अपनी नई फ़ाइल-आधारित संस्थाओं को मॉडलिंग करने में कठिनाई हो रही है, तो डिस्कॉर्ड पर संपर्क करें! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -यह वह डेटा स्रोत है जो ब्याज की फ़ाइल की पहचान होने पर उत्पन्न होगा। - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> वर्तमान में `abis` की आवश्यकता है, हालांकि फ़ाइल डेटा स्रोतों के भीतर से अनुबंधों को कॉल करना संभव नहीं है - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### फ़ाइलों को संसाधित करने के लिए एक नया हैंडलर बनाएँ - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -पढ़ने योग्य स्ट्रिंग के रूप में फ़ाइल की CID को `dataSource` के माध्यम से निम्नानुसार एक्सेस किया जा सकता है: - -```typescript -const cid = dataSource.stringParam() -``` - -उदाहरण हैंडलर: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### आवश्यक होने पर फ़ाइल डेटा स्रोत स्पॉन करें - -अब आप चेन-आधारित हैंडलर के निष्पादन के दौरान फ़ाइल डेटा स्रोत बना सकते हैं: - -- ऑटो-जेनरेट किए गए `टेम्पलेट्स` से टेम्प्लेट आयात करें -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -उदाहरण: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -यह उदाहरण CID का उपयोग पैरेंट `Token` इकाई और परिणामी `TokenMetadata` इकाई के बीच लुकअप के रूप में कर रहा है। - -> पहले, यह वह बिंदु है जिस पर एक सबग्राफ डेवलपर फ़ाइल लाने के लिए `ipfs.cat(CID)` को कॉल करता था - -बधाई हो, आप फ़ाइल डेटा स्रोतों का उपयोग कर रहे हैं! - -#### अपने उप-अनुच्छेदों को तैनात करना - -अब आप अपने सबग्राफ को किसी भी ग्राफ़ नोड >=v0.30.0-rc.0 पर `बना` और `तैनाती` कर सकते हैं। - -#### परिसीमन - -फ़ाइल डेटा स्रोत हैंडलर और संस्थाएँ अन्य सबग्राफ संस्थाओं से अलग हैं, यह सुनिश्चित करते हुए कि वे निष्पादित होने पर नियतात्मक हैं, और श्रृंखला-आधारित डेटा स्रोतों का कोई संदूषण सुनिश्चित नहीं करते हैं। विस्तार से: - -- फ़ाइल डेटा स्रोतों द्वारा बनाई गई इकाइयाँ अपरिवर्तनीय हैं, और इन्हें अद्यतन नहीं किया जा सकता है -- फ़ाइल डेटा स्रोत हैंडलर अन्य फ़ाइल डेटा स्रोतों से संस्थाओं तक नहीं पहुँच सकते -- फ़ाइल डेटा स्रोतों से जुड़ी संस्थाओं को चेन-आधारित हैंडलर द्वारा एक्सेस नहीं किया जा सकता है - -> हालांकि यह बाधा अधिकांश उपयोग-मामलों के लिए समस्याग्रस्त नहीं होनी चाहिए, यह कुछ के लिए जटिलता का परिचय दे सकती है। यदि आपको अपने फ़ाइल-आधारित डेटा को सबग्राफ में मॉडलिंग करने में समस्या आ रही है, तो कृपया डिस्कॉर्ड के माध्यम से संपर्क करें! - -इसके अतिरिक्त, फ़ाइल डेटा स्रोत से डेटा स्रोत बनाना संभव नहीं है, चाहे वह ऑनचेन डेटा स्रोत हो या अन्य फ़ाइल डेटा स्रोत। भविष्य में यह प्रतिबंध हटाया जा सकता है। - -#### सर्वोत्तम प्रथाएं - -यदि आप NFT मेटाडेटा को संबंधित टोकन से लिंक कर रहे हैं, तो टोकन इकाई से मेटाडेटा इकाई को संदर्भित करने के लिए मेटाडेटा के IPFS हैश का उपयोग करें। एक आईडी के रूप में IPFS हैश का उपयोग करके मेटाडेटा इकाई को सहेजें। - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -यदि आपके पास ऐसे निकाय हैं जो कई बार रीफ़्रेश किए गए हैं, तो IPFS हैश & amp का उपयोग करके अद्वितीय फ़ाइल-आधारित निकाय बनाएँ; इकाई आईडी, और श्रृंखला-आधारित इकाई में एक व्युत्पन्न क्षेत्र का उपयोग करके उनका संदर्भ लें। - -> हम ऊपर दिए गए सुझाव को बेहतर बनाने के लिए काम कर रहे हैं, इसलिए क्वेरी केवल "नवीनतम" संस्करण लौटाती हैं - -#### ज्ञात समस्याएँ - -फ़ाइल डेटा स्रोतों को वर्तमान में ABI की आवश्यकता है, भले ही ABI का उपयोग नहीं किया जाता है ([issue](https://github.com/graphprotocol/graph-cli/issues/961))। वर्कअराउंड किसी भी एबीआई को जोड़ना है। - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### उदाहरण - -[क्रिप्टो कॉवेन सबग्राफ माइग्रेशन](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### संदर्भ - -[GIP फ़ाइल डेटा स्रोत](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/hi/developing/creating-a-subgraph/_meta.js b/website/pages/hi/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/hi/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/hi/developing/graph-ts/_meta.js b/website/pages/hi/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/hi/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/hi/managing/deprecate-a-subgraph.mdx b/website/pages/hi/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/hi/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/hi/mips-faqs.mdx b/website/pages/hi/mips-faqs.mdx deleted file mode 100644 index de45376c7e5c..000000000000 --- a/website/pages/hi/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: एमआईपी अक्सर पूछे जाने वाले प्रश्न ---- - -## परिचय - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/hi/querying/_meta.js b/website/pages/hi/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/hi/querying/_meta.js +++ b/website/pages/hi/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/hi/querying/graph-client/_meta.js b/website/pages/hi/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/hi/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/it/_meta.js b/website/pages/it/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/it/_meta.js +++ b/website/pages/it/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/it/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/it/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 6a76b4352426..000000000000 --- a/website/pages/it/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Distribuzione di un subgraph al Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Memorizzare il token di accesso - -Dopo aver creato un account, accedere alla propria [dashboard](https://thegraph.com/hosted-service/dashboard). Copiare il token di accesso visualizzato nella dashboard ed eseguire `graph auth --product hosted-service `. In questo modo il token di accesso verrà memorizzato sul computer. È necessario farlo solo una volta, o se si rigenera il token di accesso. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Selezionare un'immagine da utilizzare come anteprima e miniatura per il subgraph. - -**Subgraph Name** - Insieme al nome dell'account con cui viene creato il subgraph, questo definirà anche il nome in stile `name-account/name-subgraph` usato per le distribuzioni e gli endpoint GraphQL. _Questo campo non può essere modificato in seguito._ - -**Account** - Il account sotto il quale è stato creato il subgraph. Può essere l'account di un individuo o di un'organizzazione. _I subgraph non possono essere spostati da un account all'altro in seguito._ - -**Subtitle** - Il testo che apparirà nelle schede dei subgraph. - -**Description** - Descrizione del subgraph, visibile nella pagina dei dettagli del subgraph. - -**GitHub URL** - Link al repository del subgraph su GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -Si distribuisce il subgraph eseguendo `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -Lo stato del subgraph passa a `Synced` una volta che il Graph Node ha estratto tutti i dati dai blocchi storici. Il Graph Node continuerà a ispezionare i blocchi del subgraph mentre questi blocchi vengono estratti. - -## Ridistribuzione di un subgraph - -Quando si apportano modifiche alla definizione del subgraph, ad esempio per risolvere un problema nelle mappature delle entità, eseguire nuovamente il comando `yarn deploy` di cui sopra per distribuire la versione aggiornata del subgraph. Ogni aggiornamento di un subgraph richiede che il Graph Node reindicizzi l'intero subgraph, sempre a partire dal blocco genesis. - -Se il subgraph distribuito in precedenza è ancora in stato `Syncing`, verrà immediatamente sostituito con la nuova versione distribuita. Se il subgraph distribuito in precedenza è già completamente sincronizzato, Graph Node segnerà la nuova versione distribuita come `Pending Version`, sincronizzarla in background e sostituire la versione attualmente distribuita con quella nuova solo al termine della sincronizzazione della nuova versione. In questo modo si ha a disposizione un subgraph su cui lavorare durante la sincronizzazione della nuova versione. - -## Distribuzione del subgraph su più reti - -In alcuni casi, si desidera distribuire lo stesso subgraph su più reti senza duplicare tutto il suo codice. Il problema principale è che gli indirizzi dei contratti su queste reti sono diversi. - -### Utilizzo di graph-cli - -Entrambi `graph build` (da `v0.29.0`) e `graph deploy` (da `v0.32.0`) accettano due nuove opzioni: - -```sh -Opzioni: - - ... - --network Configurazione di rete da utilizzare dal file di configurazione delle reti - --network-file Percorso del file di configurazione della rete (predefinito: "./networks.json") -``` - -È possibile usare l'opzione `--network` per specificare una configurazione di rete da un file standard `json` (predefinito a `networks.json`) per aggiornare facilmente il subgraph durante lo sviluppo. - -**Nota:** Il comando `init` ora genera automaticamente un `networks.json` basato sulle informazioni fornite. Sarà quindi possibile aggiornare le reti esistenti o aggiungerne altre. - -Se non si dispone di un file `networks.json`, è necessario crearne uno manualmente con la seguente struttura: - -```json -{ - "network1": { // il nome della rete - "dataSource1": { // il nome del dataSource - "address": "0xabc...", // l'indirizzo del contratto (opzionale) - "startBlock": 123456 // il startBlock (opzionale) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Nota:** Non è necessario specificare alcuno dei `template` (se ne esistono) nel file di configurazione, ma solo i `dataSource`. Se ci sono `templates` dichiarati nel file `subgraph.yaml`, la loro rete sarà automaticamente aggiornata a quella specificata con l'opzione `--network`. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Questo è l'aspetto del file di configurazione delle reti: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Ora possiamo eseguire uno dei seguenti comandi: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Ora si è pronti per `yarn deploy`. - -**Nota:** Come già menzionato, da `graph-cli 0.32.0` è possibile eseguire direttamente `yarn deploy` con l'opzione `--network`: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Utilizzo del template subgraph.yaml - -Una soluzione per le vecchie versioni di graph-cli che permettono di parametrizzare aspetti come gli indirizzi dei contratti è quella di generare parti di esso utilizzando un sistema di template come [Mustache](https://mustache.github.io/) oppure [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -e - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Inoltre, si dovrebbero sostituire il nome della rete e gli indirizzi nel manifest con i segnaposto variabili `{{network}}` e `{{address}}` e rinominare il manifest, ad esempio, in `subgraph.template.yaml`^: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Per generare un manifest per entrambe le reti, si possono aggiungere due comandi supplementari a `package.json` insieme a una dipendenza da `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -Un esempio funzionante di questo può essere trovato [qui](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Nota:** Questo approccio può essere applicato anche a situazioni più complesse, in cui è necessario sostituire più indirizzi e nomi di rete del contratto o in cui si generano mappature o ABI anche da modelli. - -## Verifica dello stato di salute del subgraph - -Se un subgraph si sincronizza con successo, è un buon segno che continuerà a funzionare bene per sempre. Tuttavia, nuovi trigger sulla rete potrebbero far sì che il subgraph si trovi in una condizione di errore non testata o che inizi a rimanere indietro a causa di problemi di prestazioni o di problemi con gli operatori dei nodi. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -In questo modo si ottiene il `chainHeadBlock` che può essere confrontato con il `latestBlock` del subgraph per verificare se è in ritardo. `synced` informa se il subgraph ha mai raggiunto la chain. `health` può attualmente assumere i valori di `healthy` se non si sono verificati errori, o `failed` se si è verificato un errore che ha bloccato l'avanzamento del subgraph. In questo caso, è possibile controllare il campo `fatalError` per i dettagli dell'errore. - -## Politica di archiviazione dei subgraph dei hosted service - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Politica di archiviazione dei subgraph di Subgraph Studio - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Ogni subgraph colpito da questa politica ha un'opzione per recuperare la versione in questione. diff --git a/website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index c5b42ea5c62d..000000000000 --- a/website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Distribuzione di un subgraph nel Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Installare il Graph CLI (con yarn oppure npm) -- Creare il subgraph nel Subgraph Studio -- Autenticare il proprio account dalla CLI -- Distribuzione di un subgraph nel Subgraph Studio - -## Installazione di Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Installare con yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Installare con npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Creare il subgraph nel Subgraph Studio - -Prima di distribuire il subgraph attuale, è necessario creare un subgraph nel [Subgraph Studio](https://thegraph.com/studio/). Si consiglia di leggere [ la documentazione di Studio](/deploying/subgraph-studio) per saperne di più. - -## Inizializzare il subgraph - -Una volta creato il subgraph nel Subgraph Studio, è possibile inizializzare il codice del subgraph utilizzando questo comando: - -```bash -graph init --studio -``` - -Il valore `` si trova nella pagina dei dettagli del subgraph nel Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -Dopo aver eseguito `graph init`, verrà chiesto di inserire l'indirizzo del contratto, la rete e l'ABI che si desidera interrogare. In questo modo si genera una nuova cartella sulla macchina locale con del codice di base per iniziare a lavorare sul subgraph. È quindi possibile finalizzare il subgraph per assicurarsi che funzioni come previsto. - -## Graph Auth - -Prima di poter distribuire il proprio subgraph nel Subgraph Studio, è necessario accedere al proprio account nella CLI. Per fare ciò, è necessaria la chiave di distribuzione che si trova nella pagina "My Subgraphs" o nella pagina dei dettagli del subgraph. - -Ecco il comando da usare per autenticarsi dalla CLI: - -```bash -graph auth --studio -``` - -## Distribuzione di un subgraph nel Subgraph Studio - -Una volta pronti, è possibile distribuire il subgraph nel Subgraph Studio. Questa operazione non pubblicherà il subgraph sulla rete decentralizzata, ma lo distribuirà solo sul vostro account di Studio, dove potrete testarlo e aggiornare i metadati. - -Ecco il comando CLI da utilizzare per distribuire il subgraph. - -```bash -graph deploy --studio -``` - -Dopo aver eseguito questo comando, la CLI chiederà un'etichetta di versione, che può essere nominata come si vuole, usando etichette come `0.1` e `0.2` oppure usando anche lettere come `uniswap-v2-0.1`. Queste etichette saranno visibili in Graph Explorer e potranno essere usate dai Curator per decidere se segnalare o meno questa versione, quindi sceglieteli con saggezza. - -Una volta distribuito, è possibile testare il subgraph nel Subgraph Studio utilizzando il playground, distribuire un'altra versione se necessario, aggiornare i metadati e, quando si è pronti, pubblicare il subgraph su Graph Explorer. diff --git a/website/pages/it/deploying/hosted-service.mdx b/website/pages/it/deploying/hosted-service.mdx deleted file mode 100644 index 8124661a2c7c..000000000000 --- a/website/pages/it/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Che cos'è il Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -Questa sezione illustra come distribuire un subgraph nel [hosted service](https://thegraph.com/hosted-service/). - -Se non si dispone di un account sul hosted service, è possibile registrarsi con il proprio account GitHub. Una volta effettuata l'autenticazione, si può iniziare a creare subgraph attraverso UI e a distribuirli dal proprio terminale. Il hosted service supporta una serie di reti, come Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum e altre ancora. - -Per un elenco completo, vedere [Reti supportate](/developing/supported-networks/#hosted-service). - -## Crea un Subgraph - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### Da un contratto esistente - -Se si dispone già di uno smart contract distribuito sulla rete scelta, l'avvio di un nuovo subgraph da questo contratto può essere un buon modo per iniziare a utilizzare l'hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -Il `` in questo caso è il nome dell'utente GitHub oppure dell'organizzazione, `` è il nome del subgraph, e `` è il nome opzionale della directory in cui `graph init` metterà il manifest del subgraph di esempio. Il `` è l'indirizzo del contratto esistente. `` è il nome della rete su cui risiede il contratto. `` è un percorso locale a un file ABI del contratto. **Entrambi `--network` e `--abi` sono opzionali.** - -### Da un subgraph di esempio - -La seconda modalità supportata da `graph init` è la creazione di un nuovo progetto a partire da un subgraph di esempio. Il comando seguente esegue questa operazione: - -``` -graph init --from-example --product hosted-service / [] -``` - -Il subgraph di esempio si basa sul contratto Gravity di Dani Grant, che gestisce gli avatar degli utenti ed emette eventi `NewGravatar` oppure `UpdateGravatar` ogni volta che gli avatar vengono creati o aggiornati. Il subgraph gestisce questi eventi scrivendo entità `Gravatar` nel negozio Graph Node e assicurandosi che vengano aggiornate in base agli eventi. Continuate con il [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) per capire meglio a quali eventi dei vostri smart contract prestare attenzione, mappature e altro ancora. - -### Da un Contratto Proxy - -Per costruire un subgraph su misura per il monitoraggio di un contratto Proxy, inizializzare il subgraph specificando l'indirizzo del contratto di implementazione. Una volta concluso il processo di inizializzazione, l'ultimo passo consiste nell'aggiornare il nome della rete nel file subgraph.yaml con l'indirizzo del contratto Proxy. Si può usare il comando seguente. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Reti supportate sul hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/it/deploying/subgraph-studio.mdx b/website/pages/it/deploying/subgraph-studio.mdx deleted file mode 100644 index 3d961f735185..000000000000 --- a/website/pages/it/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Benvenuti nel vostro nuovo launchpad👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Creare un subgraph attraverso UI di Studio -- Distribuire un subgraph usando la CLI -- Pubblicare un subgraph con UI di Studio -- Testarlo nel playground -- Integrarlo in staging utilizzando la query URL -- Creare e gestire le chiavi API per specifici subgraph - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Eseguire query dei subgraph genera tariffe di query, utilizzate per ricompensare gli [Indexer](/network/indexing) del Graph Network. Se siete sviluppatori di dapp o di subgraph, lo Studio vi consentirà di costruire subgraph migliori per alimentare le query della vostra comunità. Lo Studio è composto da 5 parti principali: - -- Il controllo dell'account utente -- Un elenco di subgraph creati dall'utente -- Una sezione per gestire, visualizzare i dettagli e lo stato di uno specifico subgraph -- Una sezione per gestire le chiavi API di cui si ha bisogno per eseguire query di un subgraph -- Una sezione per gestire la fatturazione - -## Come creare il proprio account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Una volta effettuato l'accesso, nella pagina iniziale del vostro account vedrete la vostra chiave di distribuzione unica. Questo vi permetterà di pubblicare i vostri subgraph o di gestire le chiavi API e la fatturazione. Avrete una chiave di distribuzione unica che può essere rigenerata se pensate che sia stata compromessa. - -## Come creare un subgraph nel Subgraph Studio - - - -## Compatibilità del subgraph con The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Indicizzare [una rete supportata](/developing/supported-networks) -- Non deve utilizzare nessuna delle seguenti funzioni: - - ipfs.cat e ipfs.map - - Errori non fatali - - Grafting - -Altre funzionalità e reti saranno aggiunte gradualmente a The Graph Network. - -### Flusso del ciclo di vita del subgraph - -![Ciclo di vita del subgraph](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testare il subgraph nel Subgraph Studio - -Se si desidera testare il proprio subgraph prima di pubblicarlo nella rete, è possibile farlo nel Subgraph Playground o guardare i log. I log di Subgraph vi diranno **dove** il vostro subgraph fallisce nel caso in cui lo faccia. - -## Pubblicare il proprio subgraph nel Subgraph Studio - -Siete arrivati fin qui - complimenti! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Date un'occhiata anche al video qui sotto: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Gli Indexer devono inviare i record obbligatori di Proof of Indexing a partire da uno specifico hash del blocco. Poiché la pubblicazione di un subgraph è un'azione eseguita on-chain, ricordate che la transazione può richiedere fino a qualche minuto. L'indirizzo utilizzato per pubblicare il contratto sarà l'unico in grado di pubblicare versioni future. Scegliete con attenzione! - -I subgraph con segnale di curation vengono mostrati agli Indexer in modo che possano essere indicizzati sulla rete decentralizzata. È possibile pubblicare i subgraph e il segnale in un'unica transazione, il che consente di coniare il primo segnale di curation sul subgraph e di risparmiare sui costi del gas. Aggiungendo il proprio segnale a quelli forniti successivamente dai curatori, il subgraph avrà anche maggiori possibilità di servire le query. - -**Ora che il subgraph è stato pubblicato, vediamo come gestirlo regolarmente.** Si noti che non è possibile pubblicare il subgraph sulla rete se la sincronizzazione non è riuscita. Questo di solito è dovuto alla presenza di bug nel subgraph - i log vi diranno dove si trovano questi problemi! - -## Versionamento del subgraph con la CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Si noti che la pubblicazione di una nuova versione di un subgraph nella rete comporta dei costi. Oltre alle commissioni di transazione, gli sviluppatori devono anche finanziare una parte della tassa di curation sul segnale di auto-migrazione. Non è possibile pubblicare una nuova versione del proprio subgraph se i curator non l'hanno segnalata. Per ulteriori informazioni sui rischi della curation, si legga [qui](/network/curating). - -### Archiviazione automatica delle versioni del subgraph - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Disarchiviazione](/img/Unarchive.png) diff --git a/website/pages/it/developing/creating-a-subgraph.mdx b/website/pages/it/developing/creating-a-subgraph.mdx deleted file mode 100644 index b36a9a56449a..000000000000 --- a/website/pages/it/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creare un subgraph ---- - -Un subgraph estrae i dati da una blockchain, li elabora e li memorizza in modo che possano essere facilmente interrogati tramite GraphQL. - -![Definizione di un Subgraph](/img/defining-a-subgraph.png) - -La definizione del subgraph consiste in alcuni file: - -- `subgraph.yaml`: un file YAML contenente il manifesto del subgraph - -- `schema.graphql`: uno schema GraphQL che definisce quali dati sono memorizzati per il subgraph e come interrogarli via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) codice che traduce i dati dell'evento nelle entità definite nello schema (ad esempio `mapping.ts` in questo tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Installare the Graph CLI - -The Graph CLI è scritta in JavaScript e per utilizzarla è necessario installare `yarn` oppure `npm`; in quanto segue si presume che si disponga di yarn. - -Una volta che si dispone di `yarn`, installare the Graph CLI eseguendo - -**Installare con yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Installare con npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## Da un contratto esistente - -Il comando seguente crea un subgraph che indicizza tutti gli eventi di un contratto esistente. Tenta di recuperare l'ABI del contratto da Etherscan e torna a richiedere il percorso di un file locale. Se manca uno qualsiasi degli argomenti opzionali, il comando viene eseguito attraverso un modulo interattivo. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -Il `` è l'ID del subgraph in Subgraph Studio, che si trova nella pagina dei dettagli del subgraph. - -## Da un subgraph di esempio - -La seconda modalità supportata da `graph init` è la creazione di un nuovo progetto a partire da un subgraph di esempio. Il comando seguente esegue questa operazione: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Aggiungere nuove data sources a un subgraph esistente - -Dalla `v0.31.0` il `graph-cli` supporta l'aggiunta di nuove sorgenti di dati a un subgraph esistente tramite il comando `graph add`. - -```sh -graph add
[] - -Opzioni: - - --abi Percorso dell'ABI del contratto (predefinito: download from Etherscan) - --contract-name Nome del contratto (predefinito: Contract) - --merge-entities Se unire entità con lo stesso nome (predefinito: false) - --network-file Percorso del file di configurazione della rete (predefinito: "./networks.json") -``` - -Il comando `add` recupera l'ABI da Etherscan (a meno che non sia specificato un percorso ABI con l'opzione `--abi`) e crea una nuova `dataSource` nello stesso modo in cui il comando `graph init` crea una `dataSource` `-from-contract`, aggiornando di conseguenza lo schema e le mappature. - -L'opzione `--merge-entities` identifica il modo in cui lo sviluppatore desidera gestire i conflitti tra i nomi di `entità` e `evento`: - -- If `true`: il nuovo `dataSource` dovrebbe utilizzare gli `eventHandler` & `entità` esistenti. -- If `false`: una nuova entità & il gestore dell'evento deve essere creato con `${dataSourceName}{EventName}`. - -Il contratto `address` sarà scritto in `networks.json` per la rete rilevante. - -> **Nota:** Quando si utilizza il cli interattivo, dopo aver eseguito con successo `graph init`, verrà richiesto di aggiungere un nuovo `dataSource`. - -## Manifesto di Subgraph - -Il manifesto del subgraph `subgraph.yaml` definisce gli smart contract che il subgraph indicizza, a quali eventi di questi contratti prestare attenzione e come mappare i dati degli eventi alle entità che Graph Node memorizza e permette di effettuare query. Le specifiche complete dei manifesti dei subgraph sono disponibili [qui](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -Per il subgraph di esempio, `subgraph.yaml` è: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -Le voci importanti da aggiornare per il manifesto sono: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: un elenco di tutti i nomi delle [caratteristiche](#experimental-features) utilizzati. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: l'indirizzo dello smart contract di cui il subgraph è fonte e l'ABI dello smart contract da utilizzare. L'indirizzo è facoltativo; omettendolo, si possono indicizzare gli eventi corrispondenti di tutti i contratti. - -- `dataSources.source.startBlock`: il numero opzionale del blocco da cui l'origine dati inizia l'indicizzazione. Nella maggior parte dei casi, si consiglia di utilizzare il blocco in cui è stato creato il contratto. - -- `dataSources.source.endBlock`: Il numero opzionale del blocco a cui il data source interrompe l'indicizzazione, incluso quel blocco. È richiesta la versione minima delle specifiche: `0.0.9`. - -- `dataSources.context`: coppie del valore chiave che possono essere usate nelle mappature dei subgraph. Supporta vari tipi di dati come `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, e `BigInt`. Ogni variabile deve specificare il suo `tipo` e `dati`. Queste variabili di contesto sono poi accessibili nei file di mappatura, offrendo più opzioni configurabili per lo sviluppo del subgraph. - -- `dataSources.mapping.entities`: le entità che l'origine dati scrive nell'archivio. Lo schema di ciascuna entità è definito nel file schema.graphql. - -- `dataSources.mapping.abis`: uno o più file ABI denominati per il contratto sorgente e per tutti gli altri smart contract con cui si interagisce all'interno delle mappature. - -- `dataSources.mapping.eventHandlers`: elenca gli eventi dello smart contract a cui questo subgraph reagisce e i gestori nella mappatura -./src/mapping.ts nell'esempio - che trasformano questi eventi in entità dell'archivio. - -- `dataSources.mapping.callHandlers`: elenca le funzioni dello smart contract a cui questo subgraph reagisce e i gestori della mappatura che trasformano gli input e gli output delle chiamate di funzione in entità dell'archivio. - -- `dataSources.mapping.blockHandlers`: elenca i blocchi a cui questo subgraph reagisce e i gestori nella mappatura da eseguire quando un blocco viene aggiunto alla chain. Senza un filtro, il gestore del blocco verrà eseguito ogni blocco. È possibile fornire un filtro di chiamata opzionale aggiungendo al gestore un campo `filter` con `kind: call`. Questo eseguirà il gestore solo se il blocco contiene almeno una chiamata al contratto sorgente. - -Un singolo subgraph può indicizzare i dati di più smart contract. Aggiungere all'array `dataSources` una voce per ogni contratto da cui devono essere indicizzati i dati. - -### Order of Triggering Handlers - -I trigger per una data source all'interno di un blocco sono ordinati secondo il seguente processo: - -1. I trigger di eventi e chiamate sono ordinati prima per indice di transazione all'interno del blocco. -2. I trigger di eventi e chiamate all'interno della stessa transazione sono ordinati secondo una convenzione: prima i trigger di eventi e poi quelli di chiamate, rispettando l'ordine in cui sono definiti nel manifesto. -3. I trigger di blocco vengono eseguiti dopo i trigger di evento e di chiamata, nell'ordine in cui sono definiti nel manifesto. - -Queste regole di ordinazione sono soggette a modifiche. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Ottenere gli ABI - -I file ABI devono corrispondere al vostro contratto. Esistono diversi modi per ottenere i file ABI: - -- Se state costruendo il vostro progetto, probabilmente avrete accesso alle ABI più recenti. -- Se state costruendo un subgraph per un progetto pubblico, potete scaricare il progetto sul vostro computer e ottenere l'ABI usando [`truffle compile`](https://truffleframework.com/docs/truffle/overview) o usando solc per compilare. -- È possibile trovare l'ABI anche su [Etherscan](https://etherscan.io/), ma non è sempre affidabile, in quanto l'ABI caricato su questo sito potrebbe non essere aggiornato. Assicuratevi di avere l'ABI corretto, altrimenti l'esecuzione del subgraph fallirà. - -## Schema GraphQL - -Lo schema del subgraph si trova nel file `schema.graphql`. Gli schemi GraphQL sono definiti utilizzando il linguaggio di definizione dell'interfaccia GraphQL. Se non hai mai scritto uno schema GraphQL, si consiglia di dare un'occhiata a questa guida sul sistema di tipi GraphQL. La documentazione di riferimento per gli schemi GraphQL si trova nella sezione [GraphQL API](/querying/graphql-api). - -## Definire le entità - -Prima di definire le entità, è importante fare un passo indietro e pensare a come i dati sono strutturati e collegati. Tutte le query saranno fatte sul modello di dati definito nello schema del subgraph e sulle entità indicizzate dal subgraph. Per questo motivo, è bene definire lo schema del subgraph in modo che corrisponda alle esigenze della propria applicazione. Può essere utile immaginare le entità come "oggetti contenenti dati", piuttosto che come eventi o funzioni. - -Con The Graph, è sufficiente definire i tipi di entità in `schema.graphql` e Graph Node genererà campi di primo livello per interrogare singole istanze e collezioni di quel tipo di entità. Ogni tipo che dovrebbe essere un'entità deve essere annotato con una direttiva `@entity`. Per impostazione predefinita, le entità sono mutabili, il che significa che le mappature possono caricare entità esistenti, modificarle e memorizzare una nuova versione di quell'entità. La mutabilità ha un prezzo e per i tipi di entità per i quali si sa che non saranno mai modificati, ad esempio perché contengono semplicemente dati estratti alla lettera dalla chain, si raccomanda di contrassegnarli come immutabili con `@entity(immutable: true)`. I mapping possono apportare modifiche alle entità immutabili, purché tali modifiche avvengano nello stesso blocco in cui l'entità è stata creata. Le entità immutabili sono molto più veloci da scrivere e da effettuare query e quindi dovrebbero essere utilizzate ogni volta che è possibile. - -### Buon esempio - -L'entità `Gravatar` qui sotto è strutturata intorno a un oggetto Gravatar ed è un buon esempio di come potrebbe essere definita un'entità. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Cattivo esempio - -Gli esempi di entità `GravatarAccepted` e `GravatarDeclined` che seguono sono basati su eventi. Non è consigliabile mappare gli eventi o le chiamate di funzione alle entità 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Campi opzionali e obbligatori - -I campi delle entità possono essere definiti come obbligatori o opzionali. I campi obbligatori sono indicati da `!` nello schema. Se un campo obbligatorio non è impostato nella mappatura, si riceverà questo errore quando si interroga il campo: - -``` -Null value resolved for non-null field 'name' -``` - -Ogni entità deve avere un campo `id`, che deve essere di tipo `Bytes!` oppure `String!`. In genere si raccomanda di usare `Bytes!`, a meno che il `id` non contenga testo leggibile dall'umano, poiché le entità con id `Bytes!` saranno più veloci da scrivere e da effettuare query rispetto a quelle con `String!` `id`. Il campo `id` serve come chiave primaria e deve essere unico per tutte le entità dello stesso tipo. Per ragioni storiche, è accettato anche il tipo `ID!`, sinonimo di `String!`. - -Per alcuni tipi di entità, l'`id` è costruito a partire dagli id di altre due entità; ciò è possibile usando `concat`, ad esempio, `let id = left.id.concat(right.id)` per formare l'id dagli id di `left` e `right`. Allo stesso modo, per costruire un id a partire dall'id di un'entità esistente e da un contatore `count`, si può usare `let id = left.id.concatI32(count)`. La concatenazione è garantita per produrre id unici, purché la lunghezza di `left` sia la stessa per tutte queste entità, ad esempio perché `left.id` è un `Adress`. - -### Tipi scalari integrati - -#### Scalari supportati da GraphQL - -Nella nostra API GraphQL supportiamo i seguenti scalari: - -| Tipo | Descrizione | -| --- | --- | -| `Bytes` | Byte array, rappresentato come una stringa esadecimale. Comunemente utilizzato per gli hash e gli indirizzi di Ethereum. | -| `String` | Scalare per valori `string`. I caratteri nulli non sono supportati e vengono rimossi automaticamente. | -| `Boolean` | Scalare per valori `boolean`. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | Un intero firmato a 8 byte, noto anche come intero firmato a 64 bit, può memorizzare valori nell'intervallo da -9,223,372,036,854,775,808 a 9,223,372,036,854,775,807. È preferibile utilizzare questo per rappresentare `i64` da ethereum. | -| `BigInt` | Numeri interi grandi. Utilizzati per i tipi `uint32`, `int64`, `uint64`, ..., `uint256` di Ethereum. Nota: Tutto ciò che è inferiore a `uint32` come `int32`, `uint24` oppure `int8` è rappresentato come `i32`. | -| `BigDecimal` | `BigDecimal` Decimali ad alta precisione rappresentati come un significante e un esponente. L'intervallo degli esponenti va da -6143 a +6144. Arrotondato a 34 cifre significative. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enum - -È possibile creare enum anche all'interno di uno schema. Gli enum hanno la seguente sintassi: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Una volta che l'enum è definito nello schema, si può usare la rappresentazione in stringa del valore dell'enum per impostare un campo enum su un'entità. Ad esempio, si può impostare il `tokenStatus` su `SecondOwner` definendo prima l'entità e poi impostando il campo con `entity.tokenStatus = "SecondOwner"`. L'esempio seguente mostra l'aspetto dell'entità Token con un campo enum: - -Maggiori dettagli sulla scrittura degli enum si trovano nella [documentazione di GraphQL](https://graphql.org/learn/schema/). - -#### Relazioni tra entità - -Un'entità può avere una relazione con una o più altre entità dello schema. Queste relazioni possono essere attraversate nelle query. Le relazioni in The Graph sono unidirezionali. È possibile simulare relazioni bidirezionali definendo una relazione unidirezionale su entrambe le "estremità" della relazione. - -Le relazioni sono definite sulle entità come qualsiasi altro campo, tranne per il fatto che il tipo specificato è quello di un'altra entità. - -#### Rapporti uno-a-uno - -Definire un tipo di entità `Transaction` con una relazione opzionale uno-a-uno con un tipo di entità `TransactionReceipt`: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### Relazioni uno-a-molti - -Definire un tipo di entità `TokenBalance` con una relazione obbligatoria uno-a-molti con un tipo di entità Token: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Ricerche inverse - -Le ricerche inverse possono essere definite su un'entità attraverso il campo `@derivedFrom`. Questo crea un campo virtuale sull'entità che può essere interrogato, ma non può essere impostato manualmente attraverso l'API dei mapping. Piuttosto, è derivato dalla relazione definita sull'altra entità. Per tali relazioni, raramente ha senso memorizzare entrambi i lati della relazione e sia l'indicizzazione che le prestazioni delle query saranno migliori quando solo un lato è memorizzato e l'altro è derivato. - -Per le relazioni uno-a-molti, la relazione deve sempre essere memorizzata sul lato "uno" e il lato "molti" deve sempre essere derivato. Memorizzare la relazione in questo modo, piuttosto che memorizzare un array di entità sul lato "molti", migliorerà notevolmente le prestazioni sia per l'indicizzazione che per l'interrogazione del subgraph. In generale, la memorizzazione di array di entità dovrebbe essere evitata per quanto possibile. - -#### Esempio - -Possiamo rendere accessibili i saldi di un token dal token stesso, derivando un campo `tokenBalances`: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Relazioni molti-a-molti - -Per le relazioni molti-a-molti, come ad esempio gli utenti che possono appartenere a un numero qualsiasi di organizzazioni, il modo più semplice, ma generalmente non il più performante, di modellare la relazione è come un array in ciascuna delle due entità coinvolte. Se la relazione è simmetrica, è necessario memorizzare solo un lato della relazione e l'altro lato può essere derivato. - -#### Esempio - -Definire una ricerca inversa da un tipo di entità `User` a un tipo di entità `Organization`. Nell'esempio seguente, questo si ottiene cercando l'attributo `members` dall'entità `Organization`. Nelle query, il campo `organizations` su `User` verrà risolto trovando tutte le entità `Organization` che includono l'ID dell'utente. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -Un modo più performante per memorizzare questa relazione è una tabella di mappatura che ha una voce per ogni coppia `User` / `Organization` con uno schema come - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -Questo approccio richiede che le query scendano di un ulteriore livello per recuperare, ad esempio, le organizzazioni degli utenti: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -Questo modo più elaborato di memorizzare le relazioni molti-a-molti si traduce in una minore quantità di dati memorizzati per il subgraph e quindi in un subgraph che spesso è molto più veloce da indicizzare e da effettuare query. - -#### Aggiungere commenti allo schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Definizione dei campi di ricerca fulltext - -Le query di ricerca fulltext filtrano e classificano le entità in base a un input di ricerca testuale. Le query full-text sono in grado di restituire corrispondenze per parole simili, elaborando il testo della query in gambi prima di confrontarli con i dati di testo indicizzati. - -La definizione di una query fulltext include il nome della query, il dizionario linguistico utilizzato per elaborare i campi di testo, l'algoritmo di classificazione utilizzato per ordinare i risultati e i campi inclusi nella ricerca. Ogni query fulltext può comprendere più campi, ma tutti i campi inclusi devono appartenere a un unico tipo di entità. - -Per aggiungere una query fulltext, includere un tipo `_Schema_` con una direttiva fulltext nello schema GraphQL. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -Il campo di esempio `bandSearch` può essere utilizzato nelle query per filtrare le entità `Band` in base ai documenti di testo nei campi `name`, `description`, e `bio`. Passare a [GraphQL API - Queries](/querying/graphql-api#queries) per una descrizione dell'API di ricerca fulltext e per altri esempi di utilizzo. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features): **A partire dalla `specVersion` `0.0.4`, `fullTextSearch` deve essere dichiarato nella sezione `features` del manifesto del subgraph. - -### Lingue supportate - -La scelta di una lingua diversa avrà un effetto definitivo, anche se talvolta sottile, sull'API di ricerca fulltext. I campi coperti da una query fulltext vengono esaminati nel contesto della lingua scelta, quindi i lessemi prodotti dall'analisi e dalle query di ricerca variano da lingua a lingua. Ad esempio, quando si utilizza il dizionario turco supportato, "token" viene ridotto a "toke", mentre il dizionario inglese lo riduce a "token". - -Dizionari linguistici supportati: - -| Codice | Dizionario | -| -------- | ---------- | -| semplice | Generale | -| da | Danese | -| nl | Olandese | -| en | Inglese | -| fi | Finlandese | -| fr | Francese | -| de | Tedesco | -| hu | Ungherese | -| it | Italiano | -| no | Norvegese | -| pt | Portoghese | -| ro | Rumeno | -| ru | Russo | -| es | Spagnolo | -| sv | Svedese | -| tr | Turco | - -### Algoritmi di classificazione - -Algoritmi supportati per ordinare i risultati: - -| Algoritmo | Descrizione | -| ------------- | --------------------------------------------------------------------------------------------- | -| rank | Utilizza la qualità della corrispondenza (0-1) della query fulltext per ordinare i risultati. | -| proximityRank | Simile a rank, ma include anche la vicinanza degli incontri. | - -## Scrivere le mappature - -Le mappature prendono i dati da una particolare fonte e li trasformano in entità definite nello schema. Le mappature sono scritte in un sottoinsieme di [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) chiamato [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) che può essere compilato in WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript è più rigoroso del normale TypeScript, ma offre una sintassi familiare. - -Per ogni gestore di eventi definito in `subgraph.yaml` sotto `mapping.eventHandlers`, creare una funzione esportata con lo stesso nome. Ogni gestore deve accettare un singolo parametro, chiamato `event`, con un tipo corrispondente al nome dell'evento da gestire. - -Nel subgraph di esempio, `src/mapping.ts` contiene gestori per gli eventi `NewGravatar` e `UpdatedGravatar`: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -Il primo gestore prende un evento `NewGravatar` e crea una nuova entità `Gravatar` con `new Gravatar(event.params.id.toHex())`, popolando i campi dell'entità usando i parametri corrispondenti dell'evento. Questa istanza di entità è rappresentata dalla variabile `gravatar`, con un valore id di `event.params.id.toHex()`. - -Il secondo gestore cerca di caricare il `Gravatar` esistente dal negozio dei Graph Node.Se non esiste ancora, viene creato su richiesta. L'entità viene quindi aggiornata in base ai nuovi parametri dell'evento, prima di essere salvata nel negozio con `gravatar.save()`. - -### ID consigliati per la creazione di nuove entità - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Generazione del codice - -Per rendere semplice e sicuro il lavoro con gli smart contract, gli eventi e le entità, la Graph CLI può generare tipi AssemblyScript dallo schema GraphQL del subgraph e dagli ABI dei contratti inclusi nelle data source. - -Questo viene fatto con - -```sh -graph codegen [--output-dir ] [] -``` - -ma nella maggior parte dei casi, i subgraph sono già preconfigurati tramite `package.json` per consentire di eseguire semplicemente uno dei seguenti comandi per ottenere lo stesso risultato: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Questo genera una classe AssemblyScript per ogni smart contract nei file ABI menzionati in `subgraph.yaml`, consentendo di legare questi contratti a indirizzi specifici nelle mappature e di chiamare i metodi del contratto in sola lettura contro il blocco in elaborazione. Verrà inoltre generata una classe per ogni evento contrattuale, per fornire un facile accesso ai parametri dell'evento, nonché al blocco e alla transazione da cui l'evento ha avuto origine. Tutti questi tipi sono scritti in `//.ts`. Nel subgraph di esempio, questo sarebbe `generated/Gravity/Gravity.ts`, consentendo ai mapping di importare questi tipi con. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -Inoltre, viene generata una classe per ogni tipo di entità nello schema GraphQL del subgraph. Queste classi forniscono il caricamento sicuro del tipo di entità, l'accesso in lettura e scrittura ai campi dell'entità e un metodo `save()` per scrivere le entità nella memoria. Tutte le classi di entità sono scritte in `/schema.ts`, consentendo alle mappature di importarle con - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Nota:** La generazione del codice deve essere eseguita nuovamente dopo ogni modifica allo schema GraphQL o alle ABI incluse nel manifesto. Inoltre, deve essere eseguita almeno una volta prima di costruire o distribuire il subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Modelli di Data Source - -Un modello comune negli smart contract compatibili con EVM è l'uso di contratti di registro o di fabbrica, in cui un contratto crea, gestisce o fa riferimento a un numero arbitrario di altri contratti che hanno ciascuno il proprio stato e i propri eventi. - -Gli indirizzi di questi subcontratti possono o meno essere noti in anticipo e molti di questi contratti possono essere creati e/o aggiunti nel tempo. Per questo motivo, in questi casi, la definizione di una singola data source o di un numero fisso di data source è impossibile e occorre un approccio più dinamico: i _modelli di data source_. - -### Data Source per il contratto principale - -Per prima cosa, si definisce un data source regolare per il contratto principale. Lo snippet seguente mostra un esempio semplificato di data source per il contratto [Uniswap](https://uniswap.org) exchange factory. Si noti il gestore di eventi `NewExchange(address,address)`. Questo viene emesso quando un nuovo contratto di scambio viene creato sulla chain dal contratto di fabbrica. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Modelli di data source per contratti creati dinamicamente - -Quindi, si aggiungono _modelli di data source_ al manifesto. Questi sono identici alle normali data source, tranne per il fatto che non hanno un indirizzo di contratto predefinito sotto `source`. In genere, si definisce un modello per ogni tipo di subcontratto gestito o referenziato dal contratto principale. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Istanziare un modello di data source - -Nella fase finale, si aggiorna la mappatura del contratto principale per creare un'istanza di origine dati dinamica da uno dei modelli. In questo esempio, si modificherà la mappatura del contratto principale per importare il modello `Exchange` e richiamare il metodo `Exchange.create(address)` per avviare l'indicizzazione del nuovo contratto exchange. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Nota:** Una nuova data source elaborerà solo le chiamate e gli eventi del blocco in cui è stata creata e di tutti i blocchi successivi, ma non elaborerà i dati storici, cioè quelli contenuti nei blocchi precedenti. -> -> Se i blocchi precedenti contengono dati rilevanti per la nuova data source, è meglio indicizzare tali dati leggendo lo stato attuale del contratto e creando entità che rappresentino tale stato al momento della creazione della nuova data source. - -### Contesto del Data Source - -I contesti delle data source consentono di passare una configurazione aggiuntiva quando si istanzia un modello. Nel nostro esempio, diciamo che gli scambi sono associati a una particolare coppia di trading, che è inclusa nell'evento `NewExchange`. Queste informazioni possono essere passate nell'origine dati istanziata, in questo modo: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -All'interno di una mappatura del modello `Exchange`, è possibile accedere al contesto: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Esistono setter e getter come `setString` e `getString` per tutti i tipi di valore. - -## Blocchi di partenza - -L'opzione `startBlock` è un'impostazione opzionale che consente di definire da quale blocco della chain l'origine dati inizierà l'indicizzazione. L'impostazione del blocco iniziale consente al data source di saltare potenzialmente milioni di blocchi irrilevanti. In genere, lo sviluppatore di un subgraph imposta `startBlock` sul blocco in cui è stato creato lo smart contract del data source. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Nota:** Il blocco di creazione del contratto può essere rapidamente consultato su Etherscan: -> -> 1. Cercare il contratto inserendo l'indirizzo nella barra di ricerca. -> 2. Fare clic sull'hash della transazione di creazione nella sezione `Contract Creator`. -> 3. Caricare la pagina dei dettagli della transazione, dove si trova il blocco iniziale per quel contratto. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Gestori di chiamate - -Sebbene gli eventi rappresentino un modo efficace per raccogliere le modifiche rilevanti allo stato di un contratto, molti contratti evitano di generare log per ottimizzare i costi del gas. In questi casi, un subgraph può sottoscrivere le chiamate fatte al contratto dell'origine dati. Ciò si ottiene definendo gestori di chiamate che fanno riferimento alla firma della funzione e al gestore di mappatura che elaborerà le chiamate a questa funzione. Per elaborare queste chiamate, il gestore della mappatura riceverà un `ethereum.Call` come argomento con gli input e gli output digitati della chiamata. Le chiamate effettuate a qualsiasi profondità nella chain di chiamate di una transazione attiveranno la mappatura, consentendo di catturare l'attività con il contratto della data source attraverso i contratti proxy. - -I gestori di chiamate si attivano solo in uno dei due casi: quando la funzione specificata viene chiamata da un conto diverso dal contratto stesso o quando è contrassegnata come esterna in Solidity e chiamata come parte di un'altra funzione nello stesso contratto. - -> **Nota:** I gestori delle chiamate dipendono attualmente dall'API di tracciamento Parity. Alcune reti, come la chain BNB e Arbitrum, non supportano questa API. Se un subgraph che indicizza una di queste reti contiene uno o più gestori di chiamate, non inizierà la sincronizzazione. Gli sviluppatori di subgraph dovrebbero invece utilizzare i gestori di eventi. Questi sono molto più performanti dei gestori di chiamate e sono supportati da tutte le reti evm. - -### Definire un gestore di chiamate - -Per definire un gestore di chiamate nel manifesto, basta aggiungere un array `callHandlers` sotto la data source che si desidera sottoscrivere. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -La `function` è la firma della funzione normalizzata per filtrare le chiamate. La proprietà `handler` è il nome della funzione della mappatura che si desidera eseguire quando la funzione di destinazione viene chiamata nel contratto del data source. - -### Funzione di mappatura - -Ogni gestore di chiamate accetta un singolo parametro di tipo corrispondente al nome della funzione chiamata. Nel subgraph di esempio sopra, la mappatura contiene un gestore per quando la funzione `createGravatar` viene chiamata e riceve un parametro `CreateGravatarCall` come argomento: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -La funzione `handleCreateGravatar` prende una nuova `CreateGravatarCall` che è una subclasse di `ethereum.Call`, fornita da `@graphprotocol/graph-ts`, che include gli input e gli output tipizzati della chiamata. Il tipo `CreateGravatarCall` viene generato quando si esegue `graph codegen`. - -## Gestori di blocchi - -Oltre a sottoscrivere eventi di contratto o chiamate di funzione, un subgraph può voler aggiornare i propri dati quando nuovi blocchi vengono aggiunti alla chain. A tale scopo, un subgraph può eseguire una funzione dopo ogni blocco o dopo i blocchi che corrispondono a un filtro predefinito. - -### Filtri supportati - -#### Filtro di chiamata - -```yaml -filter: - kind: call -``` - -_Il gestore definito sarà richiamato una volta per ogni blocco che contiene una chiamata al contratto (data source) sotto il quale il gestore è definito._ - -> **Nota:** Il filtro `call` dipende attualmente dall'API di tracciamento Parity. Alcune reti, come la chain BNB e Arbitrum, non supportano questa API. Se un subgraph che indicizza una di queste reti contiene uno o più gestori di blocchi con un filtro `call`, non inizierà la sincronizzazione. - -L'assenza di un filtro per un gestore di blocchi garantisce che il gestore venga chiamato a ogni blocco. Una data source può contenere un solo gestore di blocchi per ogni tipo di filtro. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Filtro di polling - -> **Richiede `specVersion` >= 0.0.8** - -> **Nota:** I filtri di polling sono disponibili solo su dataSources di `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -Il gestore definito sarà chiamato una volta ogni `n` blocchi, dove `n` è il valore fornito nel campo `every`. Questa configurazione consente al subgraph di eseguire operazioni specifiche a intervalli regolari di blocco. - -#### Filtro once - -> **Richiede `specVersion` >= 0.0.8** - -> **Nota:** I filtri once sono disponibili solo su dataSources di `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -Il gestore definito con il filtro once sarà chiamato una sola volta prima dell'esecuzione di tutti gli altri gestori. Questa configurazione consente al subgraph di utilizzare il gestore come gestore di inizializzazione, eseguendo compiti specifici all'inizio dell'indicizzazione. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Funzione di mappatura - -La funzione di mappatura riceverà un `Blocco Etereo` come unico argomento. Come le funzioni di mappatura per gli eventi, questa funzione può accedere alle entità del subgraph esistenti nell'archivio, chiamare smart contract e creare o aggiornare entità. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Eventi anonimi - -Se è necessario elaborare eventi anonimi in Solidity, è possibile farlo fornendo l'argomento 0 dell'evento, come nell'esempio: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -Un evento viene attivato solo se la firma e l'argomento 0 corrispondono. Per impostazione predefinita, `topic0` è uguale all'hash della firma dell'evento. - -## Ricevute di transazione nei gestori di eventi - -A partire da `specVersion` `0.0.5` e `apiVersion` `0.0.7`, i gestori di eventi possono avere accesso alla ricevuta della transazione che li ha emessi. - -Per fare ciò, i gestori di eventi devono essere dichiarati nel manifesto del subgraph con la nuova chiave `receipt: true`, che è opzionale e ha come valore predefinito false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -All'interno della funzione handler, è possibile accedere alla ricevuta nel campo `Event.receipt`. Se la chiave `receipt` è impostata su `false` oppure omessa nel manifesto, verrà restituito un valore `null`. - -## Caratteristiche sperimentali - -A partire da `specVersion` `0.0.4`, le caratteristiche del subgraph devono essere dichiarate esplicitamente nella sezione `features` al livello superiore del file del manifesto, utilizzando il loro nome `camelCase`, come elencato nella tabella seguente: - -| Caratteristica | Nome | -| ----------------------------------------------------- | ---------------- | -| [Errori non fatali](#non-fatal-errors) | `nonFatalErrors` | -| [Ricerca full-text](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -Ad esempio, se un subgraph utilizza le funzionalità **Full-Text Search** e **Non-fatal Errors**, il campo `features` del manifesto dovrebbe essere: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Si noti che l'uso di una caratteristica senza dichiararla incorrerà in un **errore di validazione** durante la distribuzione del subgraph, mentre non si verificherà alcun errore se una caratteristica viene dichiarata ma non utilizzata. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Errori non fatali - -Gli errori di indicizzazione su subgraph già sincronizzati causano, per impostazione predefinita, il fallimento del subgraph e l'interruzione della sincronizzazione. In alternativa, i subgraph possono essere configurati per continuare la sincronizzazione in presenza di errori, ignorando le modifiche apportate dal gestore che ha provocato l'errore. In questo modo gli autori dei subgraph hanno il tempo di correggere i loro subgraph mentre le query continuano a essere servite rispetto al blocco più recente, anche se i risultati potrebbero essere incoerenti a causa del bug che ha causato l'errore. Si noti che alcuni errori sono sempre fatali. Per essere non fatale, l'errore deve essere noto come deterministico. - -> **Nota:** Il Graph Network non supporta ancora gli errori non fatali e gli sviluppatori non dovrebbero distribuire i subgraph che utilizzano questa funzionalità alla rete tramite Studio. - -Per abilitare gli errori non fatali è necessario impostare il seguente flag di caratteristica nel manifesto del subgraph: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -La query deve anche scegliere di effettuare query dei dati con potenziali incongruenze attraverso l'argomento `subgraphError`. Si raccomanda anche di effettuare query di `_meta` per verificare se il subgraph ha saltato gli errori, come nell'esempio: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Se il subgrapg incontra un errore, la query restituirà sia i dati sia un errore graphql con il messaggio `"indexing_error"`, come in questo esempio di risposta: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting su subgraph esistenti - -> **Nota:** non è consigliabile utilizzare il grafting quando si effettua l'aggiornamento iniziale a The Graph Network. Per saperne di più, leggi [qui](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -Quando un subgraph viene distribuito per la prima volta, inizia l'indicizzazione degli eventi al blocco genesi della chain corrispondente (o al blocco `startBlock` definito con ciascuna data source). In alcune circostanze, è vantaggioso riutilizzare i dati di un subgraph esistente e iniziare l'indicizzazione in un blocco successivo. Questa modalità di indicizzazione è chiamata _Grafting_. Il grafting è utile, ad esempio, durante lo sviluppo per superare rapidamente semplici errori nelle mappature o per far funzionare di nuovo temporaneamente un subgraph esistente dopo che è fallito. - -Un subgraph viene innestato su un subgraph di base quando il manifesto del subgraph in `subgraph.yaml` contiene un blocco `graft` al livello superiore: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -Quando viene distribuito un subgraph il cui manifesto contiene un blocco di `graft`, Graph Node copierà i dati del subgraph di `base` fino al `blocco` indicato e compreso e continuerà a indicizzare il nuovo subgraph da quel blocco in poi. Il subgraph di base deve esistere sull'istanza del Graph Node di destinazione e deve essere indicizzato almeno fino al blocco dato. A causa di questa restrizione, il grafring dovrebbe essere usato solo durante lo sviluppo o in caso di emergenza per accelerare la produzione di un subgraph equivalente non grafted. - -Poiché l'innesto copia piuttosto che indicizzare i dati di base, è molto più veloce portare il subgraph al blocco desiderato rispetto all'indicizzazione da zero, anche se la copia iniziale dei dati può richiedere diverse ore per subgraph molto grandi. Mentre il subgraph innestato viene inizializzato, il Graph Node registra le informazioni sui tipi di entità già copiati. - -Il grafted subgraph può utilizzare uno schema GraphQL non identico a quello del subgraph di base, ma semplicemente compatibile con esso. Deve essere uno schema di subgraph valido di per sé, ma può discostarsi dallo schema del subgraph di base nei seguenti modi: - -- Aggiunge o rimuove i tipi di entità -- Rimuove gli attributi dai tipi di entità -- Aggiunge attributi annullabili ai tipi di entità -- Trasforma gli attributi non nulli in attributi nulli -- Aggiunge valori agli enum -- Aggiunge o rimuove le interfacce -- Cambia per quali tipi di entità viene implementata un'interfaccia - -> **[Gestione delle caratteristiche](#experimental-features): **`grafting` deve essere dichiarato tra le `caratteristiche` nel manifesto del subgraph. - -## IPFS/Arweave File Data Sources - -I data source file sono una nuova funzionalità del subgraph per accedere ai dati fuori chain durante l'indicizzazione in modo robusto ed estendibile. I data source file supportano il recupero di file da IPFS e da Arweave. - -> Questo pone anche le basi per l'indicizzazione deterministica dei dati fuori chain e per la potenziale introduzione di dati arbitrari provenienti da HTTP. - -### Panoramica - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> Questo sostituisce l'API esistente `ipfs.cat` - -### Guida all'aggiornamento - -#### Aggiornare `graph-ts` e `graph-cli` - -Le data source file richiedono l'uso di graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Aggiungere un nuovo tipo di entità che verrà aggiornato quando verranno trovati dei file - -I data sources file non possono accedere o aggiornare le entità basate sulla chain, ma devono aggiornare le entità specifiche del file. - -Ciò può significare dividere i campi delle entità esistenti in entità separate, collegate tra loro. - -Entità combinata originale: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Entità nuova, divisa: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Se la relazione è 1:1 tra l'entità genitore e l'entità data source file risultante, il modello più semplice è quello di collegare l'entità genitore a un'entità file risultante utilizzando il CID IPFS come lookup. Contattateci su Discord se avete difficoltà a modellare le vostre nuove entità basate su file! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Aggiungere una nuova data source templata con `kind: file/ipfs` oppure `kind: file/arweave` - -È l'origine dati che verrà generata quando viene identificato un file di interesse. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Attualmente sono richiesti `abis`, anche se non è possibile richiamare i contratti dall'interno del data source file - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Creare un nuovo gestore per elaborare i file - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -Il CID del file, come stringa leggibile, è accessibile tramite `dataSource` come segue: - -```typescript -const cid = dataSource.stringParam() -``` - -Esempio di gestore: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Creare i data source file quando necessario - -È ora possibile creare i data sources file durante l'esecuzione di gestori a chain: - -- Importare il modello dal `templates` generato automaticamente -- chiamare `TemplateName.create(cid: string)` da una mappatura, dove il cid è un identificatore di contenuto valido per IPFS o Arweave - -Per IPFS, Graph Node supporta [identificatori di contenuto v0 e v1](https://docs.ipfs.tech/concepts/content-addressing/) e identificatori di contenuto con directory (ad esempio `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Esempio: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -Questo creerà una nuova data source file, che interrogherà l'endpoint IPFS o Arweave configurato del Graph Node, ritentando se non viene trovato. Quando il file viene trovato, viene eseguito il gestore del data source file. - -Questo esempio utilizza il CID come ricerca tra l'entità genitore `Token` e l'entità risultante `TokenMetadata`. - -> In precedenza, questo è il punto in cui uno sviluppatore di subgraph avrebbe chiamato `ipfs.cat(CID)` per recuperare il file - -Congratulazioni, state usando i data source file! - -#### Distribuire i subgraph - -È ora possibile `build` e `deploy` il proprio subgraph su qualsiasi Graph Node >=v0.30.0-rc.0. - -#### Limitazioni - -I gestori e le entità di data source file sono isolati dalle altre entità del subgraph, assicurando che siano deterministici quando vengono eseguiti e garantendo che non ci sia contaminazione di data source basate sulla chain. Per essere precisi: - -- Le entità create di Data Source file sono immutabili e non possono essere aggiornate -- I gestori di Data Source file non possono accedere alle entità di altre data source file -- Le entità associate al Data Source file non sono accessibili ai gestori alla chain - -> Sebbene questo vincolo non dovrebbe essere problematico per la maggior parte dei casi d'uso, potrebbe introdurre complessità per alcuni. Contattate via Discord se avete problemi a modellare i vostri dati basati su file in un subgraph! - -Inoltre, non è possibile creare data source da una data source file, sia essa una data source onchain o un'altra data source file. Questa restrizione potrebbe essere eliminata in futuro. - -#### Migliori pratiche - -Se si collegano i metadati NFT ai token corrispondenti, utilizzare l'hash IPFS dei metadati per fare riferimento a un'entità Metadata dall'entità Token. Salvare l'entità Metadata usando l'hash IPFS come ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -Se si dispone di entità che vengono aggiornate più volte, creare entità univoche basate su file utilizzando l'hash IPFS & l'ID dell'entità e fare riferimento a queste entità utilizzando un campo derivato nell'entità basata sulla chain. - -> Stiamo lavorando per migliorare la raccomandazione di cui sopra, in modo che le query restituiscano solo la versione "più recente" - -#### Problemi conosciuti - -I data source dei file attualmente richiedono le ABI, anche se le ABI non sono utilizzate ([problema](https://github.com/graphprotocol/graph-cli/issues/961)). La soluzione è aggiungere qualsiasi ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Esempi - -[Migrazione del Subgraph di Crypto Coven](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Riferimenti - -[Data Sources del file GIP](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/it/developing/creating-a-subgraph/_meta.js b/website/pages/it/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/it/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/it/developing/graph-ts/_meta.js b/website/pages/it/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/it/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/it/managing/deprecate-a-subgraph.mdx b/website/pages/it/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/it/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/it/mips-faqs.mdx b/website/pages/it/mips-faqs.mdx deleted file mode 100644 index 69bc785ee5ef..000000000000 --- a/website/pages/it/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduzione - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/it/querying/_meta.js b/website/pages/it/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/it/querying/_meta.js +++ b/website/pages/it/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/it/querying/graph-client/_meta.js b/website/pages/it/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/it/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ja/_meta.js b/website/pages/ja/_meta.js index 10816c40b54d..f2f3b56163a5 100644 --- a/website/pages/ja/_meta.js +++ b/website/pages/ja/_meta.js @@ -1,21 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), - network: 'グラフネットワーク', - '###1': { - type: 'heading', - title: 'サブグラフ', - }, - developing: '現像', - deploying: '展開する', - publishing: 'パブリッシング', - managing: '管理します', - querying: 'クエリ', - cookbook: 'クックブック', - 'release-notes': 'リリースノート&アップグレードガイド', - '###3': { - type: 'heading', - title: 'インデキシング', - }, + ...meta, } diff --git a/website/pages/ja/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ja/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 1394f8047abe..000000000000 --- a/website/pages/ja/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: サブグラフをホスティングサービスにデプロイする ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## アクセストークンの保存 - -アカウントの作成後、[dashboard](https://thegraph.com/hosted-service/dashboard)に移動します。ダッシュボードに表示されているアクセストークンをコピーし、`graph auth --product hosted-service `を実行します。これでアクセストークンがあなたのコンピュータに保存されます。この作業は 1 回だけ行う必要がありますが、アクセストークンを再生成する場合も同様です。 - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - サブグラフのプレビュー画像やサムネイルとして使用する画像を選択します。 - -**Subgraph Name** - サブグラフが作成されるアカウント名と一緒に、デプロイメントや GraphQL エンドポイントで使用される`account-name/subgraph-name`のスタイル名も定義します。*このフィールドは後で変更できません*このフィールドは後で変更できません。 - -**Account** - サブグラフが作成されるアカウントです。これは個人または組織のアカウントになります。_サブグラフは後でアカウント間で移動できません_。 - -**Subtitle** - サブグラフカードに表示されるテキストです。 - -**Description** - サブグラフの説明、サブグラフの詳細ページで表示されます。 - -**GitHub URL** - GitHub 上のサブグラフのリポジトリへのリンクです。 - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -サブグラフのデプロイは、`yarn deploy`を実行して行います。 - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -グラフ ノードが履歴ブロックからすべてのデータを抽出すると、サブグラフのステータスは `Synced` に切り替わります。これらのブロックがマイニングされると、グラフ ノードはサブグラフのブロックを検査し続けます。 - -## サブグラフの再デプロイ - -エンティティ・マッピングの問題を修正するなど、サブグラフの定義に変更を加える場合は、上記の`yarn deploy` コマンドを再度実行して、サブグラフの更新版をデプロイします。サブグラフを更新する際には、グラフ・ノードがサブグラフ全体のインデックスを再作成する必要があります。 - -以前にデプロイされたサブグラフがまだ`Syncing`の状態にある場合は、新しくデプロイされたバージョンにすぐに置き換えられます。以前にデプロイされたサブグラフがすでに完全に同期されている場合、グラフノードは新しくデプロイされたバージョンを「`Pending Version`」としてマークし、バックグラウンドで同期を行い、新しいバージョンの同期が終了してから、現在デプロイされているバージョンを新しいバージョンに置き換えます。これにより、新しいバージョンが同期している間も、サブグラフを使って作業することができます。 - -## サブグラフを複数のネットワークにデプロイする - -場合によっては、すべてのコードを複製せずに、同じサブグラフを複数のネットワークに展開する必要があります。これに伴う主な課題は、これらのネットワークのコントラクト アドレスが異なることです。 - -### graph-cliを使用する - -`graph build` (`v0.29.0` から) と `graph deploy` (`v0.32.0` から) は共に、二つの新しいオプションを受け入れるようになりました。 - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -`--network` オプションで、`json` 標準ファイル(デフォルトは `networks.json` )からネットワーク設定を指定して、開発中にサブグラフを簡単に更新することが可能です。 - -**Note:** `init` コマンドは提供された情報に基づいて `networks.json` を自動生成するようになりました。これにより、既存のネットワークを更新したり、追加したりすることができます。 - -`networks.json`ファイルがない場合は、以下の構造で手動で作成する必要があります: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** 設定ファイルでは`templates`(もしあれば)を指定する必要はなく、`dataSources`だけを指定すればよいでしょう。もし、`subgraph.yaml`ファイルで宣言された`templates`があれば、それらのネットワークは自動的に`--network`オプションで指定したものに更新されます。 - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -ネットワーク設定ファイルはこのようになっているはずです: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -これで、次のいずれかのコマンドを実行できるようになりました: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -これで`yarn deploy`の準備が整いました。 - -**Note:** 前述のように、`graph-cli 0.32.0` からは `--network` オプションで直接 `yarn deploy` を実行できるようになりました: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### subgraph.yamlテンプレートの使用 - -古いバージョンのgraph-cliで、コントラクトアドレスのような側面をパラメータ化できるようにする一つの解決策は、[Mustache](https://mustache.github.io/) や [Handlebars](https://handlebarsjs.com/) などのテンプレートシステムを使ってその一部を生成することです。 - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and/と - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -また、マニフェストのネットワーク名とアドレスを変数プレースホルダー`{{network}}`と`{{address}}`で置き換え、マニフェストの名前を例えば`subgraph.template.yaml`に変更します。 - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -どちらのネットワークにも対応したマニフェストを生成するには、`package.json`に`mustache`への依存関係とともに 2 つのコマンドを追加します: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -この例は、[こちら](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759)でご覧いただけます。 - -**注:** このアプローチは、より複雑な状況にも適用できます。たとえば、コントラクト アドレスやネットワーク名以外のものを置き換える必要がある場合や、テンプレートからマッピングや ABI を生成する場合にも適用できます。 - -## サブグラフのヘルスチェック - -サブグラフが正常に同期された場合、それはそれが永久に正常に動作し続けることを示す良い兆候です。ただし、ネットワーク上の新しいトリガーにより、サブグラフがテストされていないエラー状態に陥ったり、パフォーマンスの問題やノード オペレーターの問題により遅れが生じたりする可能性があります。 - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -これにより、`chainHeadBlock`が得られ、サブグラフの`latestBlock`と比較して、遅れているかどうかを確認できます。 `synced`は、サブグラフがチェーンに追いついたかどうかを通知します。`health`は現在、エラーが発生していない場合は`healthy`、サブグラフの進行を停止させるエラーが発生した場合は`failed`という値を取ることができます。この場合、このエラーの詳細について`fatalError`フィールドを確認できます。 - -## ホスティングサービス・サブグラフ・アーカイブポリシー - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio・サブグラフ・アーカイブポリシー - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -このポリシーで影響を受けるすべてのサブグラフには、問題のバージョンを戻すオプションがあります。 diff --git a/website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 2237ceff91c7..000000000000 --- a/website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Subgraph Studio へのサブグラフのデプロイ ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Graph CLIをインストールする(yarnまたはnpmを使用) -- Subgraph Studio でサブグラフを作成する -- CLI から自分のアカウントを認証する -- Subgraph Studio へのサブグラフのデプロイ - -## Graph CLI のインストール - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**yarn でインストールします:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**npm でインストールします:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Subgraph Studio でサブグラフを作成する - -実際のサブグラフをデプロイする前に、[Subgraph Studio](https://thegraph.com/studio/)でサブグラフを作成する必要があります。詳しくは、[Studio documentation](/deploying/subgraph-studio)をお読みになることをお勧めします。 - -## サブグラフの初期化 - -Subgraph Studio でサブグラフが作成されると、以下のコマンドでサブグラフのコードを初期化することができます: - -```bash -graph init --studio -``` - -``の値は、Subgraph Studio のサブグラフの詳細ページにあります: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -`graph init`を実行すると、クエリしたいコントラクトアドレス、ネットワーク、Abi の入力を求められます。これを行うと、サブグラフの作業を開始するためのいくつかの基本的なコードを含む新しいフォルダがローカルマシン上に生成されます。その後、サブグラフが期待通りに動作することを確認するために、サブグラフを最終的に完成させることができます。 - -## グラフ認証 - -Subgraph Studio にサブグラフをデプロイする前に、CLI で自分のアカウントにログインする必要があります。これを行うには、"My Subgraphs "ページまたはサブグラフの詳細ページにある deploy key が必要になります。 - -以下は、CLI から認証を行うために必要なコマンドです: - -```bash -graph auth --studio 」 -``` - -## Subgraph Studio へのサブグラフのデプロイ - -準備ができたら、Subgraph Studio にサブグラフをデプロイすることができます。これを行うと、分散型ネットワークにサブグラフが公開されず、テストやメタデータの更新が可能な Studio アカウントにのみデプロイされます。 - -以下は、サブグラフのデプロイに必要な CLI コマンドです。 - -```bash -graph deploy --studio -``` - -このコマンドを実行すると、CLI がバージョンラベルの入力を求めてきますので、`0.1`や`0.2`といったラベルを使ったり、`uniswap-v2-0.1`のようにアルファベットを使ったりと、好きなように名前を付けることができます。これらのラベルはグラフエクスプローラーに表示され、キュレーターがこのバージョンにシグナルを送りたいかどうかを判断するのに使われますので、賢く選択しましょう。 - -デプロイされた後は、Subgraph Studio でプレイグラウンドを使ってサブグラフをテストし、必要に応じて別のバージョンをデプロイし、メタデータを更新し、準備ができたら Graph Explorer にサブグラフを公開することができます。 diff --git a/website/pages/ja/deploying/hosted-service.mdx b/website/pages/ja/deploying/hosted-service.mdx deleted file mode 100644 index e4e0c5017f5e..000000000000 --- a/website/pages/ja/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: とはホストされたサービス? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -このセクションでは、[ホストされたサービス](https://thegraph.com/hosted-service/)にサブグラフをデプロイする手順を説明します。 - -ホストされているサービスのアカウントをお持ちでない場合は、GitHub アカウントでサインアップできます。認証が完了すると、UI を介してサブグラフの作成を開始し、端末からそれらのデプロイを開始できます。ホストされたサービスは、Polygon、Gnosis Chain、BNB Chain、Optimism、Arbitrum などの多数のネットワークをサポートします。 - -包括的なリストについては、[サポートされているネットワーク](/developing/supported-networks/#hosted-service)をご覧ください。 - -## サブグラフの作成 - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### 既存のコントラクトから - -選択したネットワークにスマート コントラクトを既に展開している場合は、このコントラクトから新しいサブグラフをブートストラップすることが、ホストされるサービスを開始するための良い方法となる可能性があります。 - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -この場合の `` は GitHub ユーザーまたは組織名、`` はサブグラフの名前、`` です。 `graph init` がサンプル サブグラフ マニフェストを配置するディレクトリのオプションの名前です。 `` は、既存の契約のアドレスです。 `` は、コントラクトが存在するネットワークの名前です。 `` は、コントラクト ABI ファイルへのローカル パスです。 **`--network` と `--abi` はどちらもオプションです。** - -### サブグラフの例から - -`graph init`がサポートする 2 つ目のモードは、例となるサブグラフから新しいプロジェクトを作成することです。以下のコマンドがこれを行います: - -``` -graph init --from-example --product hosted-service / [] -``` - -サンプルのサブグラフは、Dani Grant による Gravity コントラクトをベースにしています。このコントラクトは、ユーザーのアバターを管理し、アバターが作成または更新されるたびに`NewGravatar`または`UpdateGravatar`イベントを発行します。サブグラフは、`Gravatar`エンティティをグラフノードストアに書き込み、イベントに応じてこれらが更新されるようにすることで、これらのイベントを処理します。[subgraph manifest](/developer/create-subgraph-hosted#the-subgraph-manifest)を見ると、スマートコントラクトからどのイベントに注意を払うべきか、マッピングなどがよくわかります。 - -### Proxy Contractから - -Proxyコントラクトを監視するためのサブグラフを作成するには、実装コントラクトのアドレスを指定してサブグラフを初期化します。初期化プロセスが完了したら、最後のステップとして、subgraph.yamlファイルのネットワーク名をProxyコントラクトのアドレスに更新します。以下のコマンドを使用します。 - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## ホスト型サービスでサポートされているネットワーク - -対応ネットワークの一覧は[こちら](/developing/supported-networks)で確認できます。 diff --git a/website/pages/ja/deploying/subgraph-studio.mdx b/website/pages/ja/deploying/subgraph-studio.mdx deleted file mode 100644 index a5076994535f..000000000000 --- a/website/pages/ja/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -新しいランチパッドの使い方👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Studio UI によるサブグラフの作成 -- CLI を使用したサブグラフのデプロイ -- スタジオ UI でサブグラフをリリースする -- プレイグラウンドでのテスト -- クエリ URL を使用してステージングに統合する -- 特定のサブグラフ用の API キーの作成と管理 - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -サブグラフへのクエリはクエリフィーを発生させ、グラフネットワーク上の[インデクサー](/network/indexing)への報酬とします。dapp の開発者やサブグラフの開発者は、この Studio を利用することで、より優れたサブグラフを構築し、自分やコミュニティのクエリを強化することができます。Studio は 5 つの主要部分で構成されています: - -- ユーザー アカウント コントロール -- 作成したサブグラフのリスト -- 特定のサブグラフのステータスを管理、詳細表示、視覚化するセクション -- サブグラフのクエリに必要な API キーを管理するセクション -- 請求書を管理するためのセクション - -## アカウントの作成方法 - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. サインインすると、アカウントのホームページに固有のデプロイキーが表示されます。これにより、サブグラフの公開や API キーと課金の管理を行うことができます。固有のデプロイキーは、漏洩したと思われる場合に再生成することができます。 - -## Subgraph Studio でサブグラフを作成する方法 - - - -## Subgraph と The Graph Network の互換性 - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- [サポートされているネットワーク](/developing/supported-networks)にインデックスを付ける -- 以下の機能のいずれも使用してはいけません: - - ipfs.cat & ipfs.map - - 致命的でないエラー - - Grafting - -The Graph Network の機能やネットワークは順次追加されていきます。 - -### サブグラフのライフサイクルフロー - -![サブグラフのライフサイクル](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Subgraph Studio でのサブグラフのテスト - -ネットワークに公開する前にサブグラフをテストしたい場合、Subgraph **Playground**でこれを行うか、ログを見ることができます。Subgraph のログは、サブグラフが失敗した場合に、**どこで**失敗したかを教えてくれます。 - -## Subgraph Studio でサブグラフを公開する - -ここまで来ました - おめでとうございます! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -以下のビデオの概要もご覧ください: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -インデクサーは、特定のブロック ハッシュの時点で必須のインデックス作成証明レコードを提出する必要があります。サブグラフの公開はチェーン上で実行されるアクションであるため、トランザクションが完了するまでに最大で数分かかる場合があることに注意してください。コントラクトを公開するために使用するアドレスは、将来のバージョンを公開できる唯一のアドレスになります。賢明に選択してください! - -キュレーションシグナルを持つサブグラフは、分散型ネットワーク上でインデックスを作成できるようにインデクサーに表示されます。サブグラフとシグナルを 1 つのトランザクションで公開できるので、サブグラフの最初のキュレーションシグナルをミントすることができ、ガス代を節約することができます。また、キュレーターが後から提供するシグナルに自分のシグナルを加えることで、自分のサブグラフが最終的にクエリを提供する確率が高くなります。 - -**サブグラフの公開が完了したところで、次にサブグラフの定期的な管理方法について説明します。**同期に失敗したサブグラフは、ネットワークに公開できないことに注意してください。これは通常、サブグラフにバグがあるためで、ログを見ればどこに問題があるかがわかります。 - -## CLI によるサブグラフのバージョン管理 - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -なお、サブグラフの新バージョンをネットワークに公開する際には、コストがかかります。トランザクション手数料に加えて、開発者は自動移行するシグナルにかかるキュレーション税の一部を賄う必要があります。キュレーターがシグナルを出していない場合、サブグラフの新バージョンを公開することはできません。キュレーションのリスクについて、詳しくは[こちら](/network/curating)をご覧ください。 - -### サブグラフのバージョンの自動アーカイブ - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - アーカイブ解除](/img/Unarchive.png) diff --git a/website/pages/ja/developing/creating-a-subgraph.mdx b/website/pages/ja/developing/creating-a-subgraph.mdx deleted file mode 100644 index 35fd536b4e2c..000000000000 --- a/website/pages/ja/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1602 +0,0 @@ ---- -title: サブグラフの作成 ---- - -サブグラフは、ブロックチェーンからデータを抽出し、加工して保存し、GraphQLで簡単にクエリできるようにします。 - -![サブグラフの定義](/img/defining-a-subgraph.png) - -サブグラフの定義は、いくつかのファイルで構成されています。 - -- `subgraph.yaml`:サブグラフのマニフェストを含む YAML ファイル - -- `schema.graphql`: サブグラフにどのようなデータが保存されているか、また GraphQL を使ってどのようにクエリを行うかを定義する GraphQL スキーマ - -- `AssemblyScript Mappings`: イベントデータをスキーマで定義されたエンティティに変換する[AssemblyScript](https://github.com/AssemblyScript/assemblyscript)コード (例: このチュートリアルでは`mapping.ts`) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Graph CLI のインストール - -Graph CLI は JavaScript で書かれており、使用するには`yarn`または `npm`のいずれかをインストールする必要があります。 - -`yarn`をインストールしたら、次のコマンドを実行して Graph CLI をインストールする。 - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## 既存のコントラクトから - -次のコマンドは、既存のコントラクトのすべてのイベントにインデックスを付けるサブグラフを作成します。Etherscan からコントラクト ABI をフェッチしようとしますが、ローカルファイルパスの要求にフォールバックします。オプションの引数のいずれかが欠けている場合は、対話形式で行われます。 - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -``は、Subgraph Studio でのサブグラフの ID で、サブグラフの詳細ページに記載されています。 - -## サブグラフの例から - -`graph init`がサポートする 2 つ目のモードは、例となるサブグラフから新しいプロジェクトを作成することです。以下のコマンドがこれを行います: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## 既存のサブグラフに新しいデータソースを追加する - -`v0.31.0` 以降、`graph-cli`は、`graph add` コマンドにより既存のサブグラフに新しいデータソースを追加することをサポートしました。 - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -`add` コマンドは Etherscan から ABI を取得し (`--abi` オプションで ABI パスが指定されていない限り)、 `graph init` コマンドが `dataSource` `--from-contract` を作成したのと同じ方法で新しい `dataSource` を作成してスキーマとマッピングをそれに従って更新します。 - -`--merge-entities` オプションは、開発者が `entity` と `event` の名前の衝突をどのように処理したいかを指定します。 - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -契約書の`address`は、該当するネットワークの`networks.json`に書き込まれることになります - -> **Note:** 対話型CLIを使用している場合、`graph init`を正常に実行した後、新しい`dataSource`を追加するよう促されます。 - -## サブグラフ・マニフェスト - -サブグラフ・マニフェスト`subgraph.yaml`は、サブグラフがインデックスするスマート・コントラクト、これらのコントラクトからのどのイベントに注目するか、そしてイベント・データをグラフ・ノードが保存するエンティティにどのようにマッピングするかを定義し、クエリを可能にします。サブグラフ・マニフェストの完全な仕様は、[こちら](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md)をご覧ください。 - -例のサブグラフの場合、`subgraph.yaml`は次のようになっています: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -マニフェストを更新する重要な項目は以下の通りです: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: 使用されるすべての[feature](#experimental-features)名のリストです。 - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: サブグラフのソースとなるスマートコントラクトのアドレスと、使用するスマートコントラクトの abi です。アドレスはオプションで、省略すると、すべてのコントラクトからのマッチングイベントにインデックスを付けることができます。 - -- `dataSources.source.startBlock`: データソースがインデックス作成を開始するブロックの番号(オプション)です。ほとんどの場合、コントラクトが作成されたブロックの使用をお勧めします。 - -- `dataSources.source.endBlock`:データ ソースがインデックス作成を停止するブロックのオプション番号 (そのブロックを含む)。必要な最小仕様バージョン: `0.0.9` - -- `dataSources.context`:サブグラフマッピング内で使用できるキーと値のペア。`Bool`、`String`、`Int`、`Int8`、`BigDecimal`、`Bytes`、`List`、`BigInt`のような様々なデータ型をサポートしています。各変数は`type`と`data`を指定する必要がある。これらのコンテキスト変数は、マッピング・ファイルからアクセスすることができ、サブグラフ開発のためのより多くの設定可能なオプションを提供します。 - -- `dataSources.mapping.entities`: データソースがストアに書き込むエンティティです。各エンティティのスキーマは、schema.graphql ファイルで定義されます。 - -- `dataSources.mapping.abis`: ソースコントラクトおよびマッピング内から対話する他のスマートコントラクトのための 1 つまたは複数の名前付き ABI ファイルです。 - -- `dataSources.mapping.eventHandlers`: このサブグラフが反応するスマートコントラクトイベントと、これらのイベントをストア内のエンティティに変換するマッピング内のハンドラ(例では./src/mapping.ts)をリストアップします。 - -- `dataSources.mapping.callHandlers`: このサブグラフが反応するスマートコントラクト関数と、関数呼び出しの入力と出力をストア内のエンティティに変換するマッピング内のハンドラをリストアップします。 - -- `dataSources.mapping.blockHandlers`: このサブグラフが反応するブロックと、ブロックがチェーンに追加されたときに実行されるマッピング内のハンドラーをリストします。フィルターを使用しない場合、ブロック ハンドラーはすべてのブロックで実行されます。オプションの call-filter は、`filter` フィールドと `kind: call` をハンドラーに追加することで提供できます。これは、ブロックにデータ ソース コントラクトへの呼び出しが少なくとも 1 つ含まれている場合にのみ、ハンドラーを実行します。 - -単一のサブグラフは複数のスマートコントラクトからデータを索引化できます。`dataSources`配列に、索引化するデータが必要な各コントラクトのエントリを追加してください - -### Order of Triggering Handlers - -ブロック内のデータソースのトリガーは、以下のプロセスを使用して順序付けられます: - -1. イベントとコールのトリガーは、ブロック内のトランザクションインデックスで最初に並べられます。 -2. 同じトランザクション内のイベントトリガーとコールトリガーは、マニフェストで定義されている順序にしたがって、イベントトリガーが先、コールトリガーが後という規則で並べられます。 -3. ブロックトリガーは、イベントトリガーとコールトリガーの後に、マニフェストで定義されている順番で実行されます。 - -これらの順序規則は変更されることがあります。 - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| バージョン | リリースノート | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### ABI を取得する - -ABI ファイルは、契約内容と一致している必要があります。ABI ファイルを入手するにはいくつかの方法があります: - -- 自分のプロジェクトを構築している場合は、最新の ABI にアクセスできる可能性があります。 -- 公開プロジェクトのサブグラフを作成している場合は、そのプロジェクトをコンピュータにダウンロードし、 [`truffle compile`](https://truffleframework.com/docs/truffle/overview)または solc to compile を使用して ABI を取得することができます。 -- ABI は[Etherscan](https://etherscan.io/)にもありますが、アップロードされた ABI が古いかもしれないので、必ずしも信頼できるものではありません。正しい ABI でないと、サブグラフの実行に失敗します。 - -## GraphQL スキーマ - -サブグラフのスキーマは、`schema.graphql`というファイルにあります。GraphQL スキーマは、GraphQL インターフェース定義言語を用いて定義される。GraphQL スキーマを書いたことがない場合は、GraphQL の型システムについての入門書をご覧になることをお勧めします。GraphQL スキーマのリファレンスドキュメントは、[GraphQL API](/querying/graphql-api)のセクションにあります。 - -## エンティティの定義 - -エンティティを定義する前に、一歩下がって、データがどのように構造化され、リンクされているかを考えることが重要です。すべてのクエリは、サブグラフのスキーマで定義されたデータモデルと、サブグラフでインデックス化されたエンティティに対して行われます。このため、Dap のニーズに合わせてサブグラフ・スキーマを定義すると良いでしょう。エンティティは、イベントや関数ではなく、「データを含むオブジェクト」と考えるとよいでしょう。 - -The Graphでは、`schema.graphql`にエンティティタイプを定義するだけで、Graph Nodeがそのエンティティタイプのシングルインスタンスやコレクションを問い合わせるためのトップレベルのフィールドを生成してくれます。エンティティになるべき各タイプは、`@entity`ディレクティブでアノテーションされることが要求されます。デフォルトでは、エンティティはミュータブルです。つまり、マッピングは既存のエンティティをロードし、それを変更し、そのエンティティの新しいバージョンを保存することができます。Mutable には代償があり、例えば、チェーンからそのまま抽出されたデータを含むなど、決して変更されないことが分かっているエンティティタイプには、`@entity(immutable: true)` で immutable としてマークすることが推奨されます。マッピングは、エンティティが作成されたのと同じブロック内で変更が行われる限り、Immutableエンティティに変更を加えることができます。Immutableなエンティティは、書き込みや問い合わせが非常に高速になるため、可能な限り使用すべきです。 - -### 良い例 - -以下の`Gravatar`エンティティは、Gravatar オブジェクトを中心に構成されており、エンティティを定義する上での良い例です。 - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### 悪い例 - -`GravatarAccepted`エンティティと`GravatarDeclined`エンティティの例は、イベントに基づいています。イベントや関数の呼び出しとエンティティを 1:1 で対応させることはお勧めできません。 - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### 任意フィールドと必須フィールド - -エンティティのフィールドは、必須またはオプションとして定義できます。必須フィールドは、スキーマの中で `!`で示されます。マッピングで必須フィールドが設定されていない場合、フィールドを照会すると次のようなエラーが表示されます: - -``` -Null 以外のフィールド 'name' の null 値が解決されました -``` - -各エンティティには `id` フィールドが必要です。このフィールドは `Bytes!` または `String!` 型である必要があります。 `Bytes!` の ID を持つエンティティは書き込みが高速になるため、`ID` に人間が読み取れるテキストが含まれていない限り、通常は `Bytes!` を使用することをお勧めします。 `String!` `id` を持つものとしてクエリします。 `id` フィールドは主キーとして機能し、同じタイプのすべてのエンティティ間で一意である必要があります。歴史的な理由から、タイプ `ID!` も受け入れられ、`String!` と同義です。 - -例えば、`let id = left.id.concat(right.id)` は `left` と `right` のidからidを生成するために使用されます。同様に、既存のエンティティのidとカウンタ`count`からidを構成するには、`let id = left.id.concatI32(count)` を使うことができます。この連結は、`left`の長さが、例えば、`left.id`が`Address`であるように、全てのそうした実体に対して同じである限り、ユニークなidを作り出すことが保証されています。 - -### 組み込みの Scalar タイプ - -#### GraphQL がサポートする Scalar - -GraphQL API では、以下の Scalar をサポートしています: - -| タイプ | 説明書き | -| --- | --- | -| `Bytes` | Byte 配列で、16 進数の文字列で表されます。Ethereum のハッシュやアドレスによく使われます。 | -| `String` | `string`値の Scalar であり、Null 文字はサポートされておらず、自動的に削除されます。 | -| `Boolean` | `boolean`値を表す Scalar。 | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | 大きな整数。Ethereum の`uint32`, `int64`, `uint64`, ..., `uint256` タイプに使用されます。注: `int32`, `uint24` `int8`など`uint32`以下のものは`i32`として表現されます。 | -| `BigDecimal` | `BigDecimal`は、高精度の 10 進数を記号と指数で表します。指数の範囲は -6143 ~ +6144 です。有効数字 34 桁にまとめられます。 | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -スキーマ内に enums を作成することもできます。enums は次のような構文になっています: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -スキーマで enum が定義されると、enum 値の文字列表現を使用してエンティティに enum フィールドを設定することができます。例えば、`tokenStatus`を`SecondOwner`に設定するには、まずエンティティを定義し、続いて`entity.tokenStatus = "SecondOwner`でフィールドを設定します。以下の例は、Token エンティティが enum フィールドを持つように見えることを示しています: - -enums の記述についての詳細は、[GraphQL documentation](https://graphql.org/learn/schema/)を参照してください。 - -#### エンティティのリレーションシップ - -エンティティは、スキーマ内の 1 つ以上の他のエンティティとリレーションシップを持つことができます。これらの関係は、クエリの中で走査されることがあります。The Graph のリレーションシップは単方向です。リレーションシップのどちらかの "端 "に単方向のリレーションシップを定義することで、双方向のリレーションシップをシミュレートすることができます。 - -リレーションシップは、指定されたタイプが他のエンティティのものであることを除けば、他のフィールドと同様にエンティティに定義されます。 - -#### 1 対 1 のリレーションシップ - -`TransactionReceipt`エンティティタイプとの 1 対 1 の関係を持つ`Transaction`エンティティタイプを定義します: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### 1 対多のリレーションシップ - -`TokenBalance`エンティティタイプに、Token エンティティタイプとの 1 対多の関係が必要なものを定義します: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### 逆引き(Reverse lookups) - -逆引きは、`@derivedFrom`フィールドを使ってエンティティに定義できます。これにより、エンティティ上に仮想的なフィールドが作成されます。このフィールドにはクエリをかけることができますが、マッピング API を通じて手動で設定することはできません。むしろ、他のエンティティで定義された関係から派生します。このような関係では、関係の両側を保存することに意味があることはほとんどありません。一方の側だけを保存し、もう一方の側を派生させた方が、インデックス作成とクエリのパフォーマンスの両方が向上します。 - -1 対多の関係では、関係は常に「1」側に格納され、「多」側は常に派生されるべきです。「多」側にエンティティの配列を格納するのではなく、このように関係を格納することで、サブグラフのインデックス作成と問い合わせの両方で劇的にパフォーマンスが向上します。一般的に、エンティティの配列を保存することは、現実的に可能な限り避けるべきです。 - -#### 例 - -`tokenBalances`フィールドを派生させることで、トークンの残高にアクセスできるようになります: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### 多対多のリレーションシップ - -ユーザーがそれぞれ任意の数の組織に所属しているような多対多の関係の場合、関係をモデル化する最も簡単な方法は、関係する 2 つのエンティティのそれぞれに配列として格納することですが、一般的には最もパフォーマンスの高い方法ではありません。対称的な関係であれば、関係の片側のみを保存する必要があり、もう片側は派生させることができます。 - -#### 例 - -`User`エンティティタイプから`Organization`エンティティタイプへの逆引きを定義します。以下の例では、`Organization`エンティティの中から`members`属性を検索することで実現しています。クエリでは、`User`の`organizations`フィールドは、ユーザの ID を含むすべての`Organization`エンティティを見つけることで解決されます。 - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -この関係を保存するためのより効率的な方法は、次のようなスキーマを持つ `User`/ `Organization`のペアごとに 1 つのエントリを持つマッピングテーブルを使用することです。 - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -このアプローチでは、例えばユーザーの組織を取得するために、クエリをさらに 1 つのレベルに下げる必要があります: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -このように多対多の関係をより精巧に保存する方法では、サブグラフに保存されるデータが少なくなるため、サブグラフのインデックス作成や問い合わせが劇的に速くなります。 - -#### スキーマへのコメントの追加 - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## フルテキスト検索フィールド(Full Text Search)の定義 - -フルテキスト検索クエリは、テキスト検索入力に基づいてエンティティをフィルタリングし、ランク付けします。フルテキストクエリは、インデックス化されたテキストデータと比較する前に、クエリテキストの入力をステム処理することで、類似した単語のマッチを返すことができます。 - -フルテキストクエリの定義には、クエリ名、テキストフィールドの処理に使用される言語辞書、結果の順序付けに使用されるランキングアルゴリズム、および検索に含まれるフィールドが含まれます。各フルテキスト・クエリは複数のフィールドにまたがることができますが、含まれるフィールドはすべて単一のエンティティ・タイプのものでなければなりません。 - -フルテキストクエリを追加するには、GraphQL スキーマにフルテキスト指示文を含む`_Schema_`タイプを記述します。 - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -例の`bandSearch`フィールドは、`name`、`description`、`bio`フィールドのテキスト文書に基づいて`Band`エンティティをフィルタリングするクエリで使用できます。全文検索 API の説明や詳しい使用例については、[GraphQL API - Queries](/querying/graphql-api#queries)を参照してください。 - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):**`specVersion` `0.0.4`以降では、subgraph manifest の`features`セクションで`fullTextSearch`を宣言する必要があります。 - -### 対応言語 - -異なる言語を選択すると、フルテキスト検索 API に決定的な影響を与えますが、場合によっては微妙な影響もあります。フルテキストクエリフィールドでカバーされるフィールドは、選択された言語のコンテキストで検査されるため、分析や検索クエリで生成される語彙は言語ごとに異なります。たとえば、サポートされているトルコ語辞書を使用した場合、"token "は "toke "にステム処理されますが、もちろん英語辞書では "token "にステム処理されます。 - -サポートされている言語の辞書: - -| コード | 辞書 | -| ------ | ------------ | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | ポルトガル語 | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### ランキングアルゴリズム - -サポートされている結果の順序付けのアルゴリズム: - -| アルゴリズム | 説明書き | -| ------------- | ------------------------------------------------------------------- | -| rank | フルテキストクエリのマッチ品質 (0-1) を使用して結果を並べ替えます。 | -| proximityRank | ProximityRank rank に似ていますが、マッチの近接性も含みます。 | - -## マッピングの記述 - -マッピングは、特定のソースからデータを取得し、スキーマ内で定義されているエンティティに変換します。マッピングは、WASM ([WebAssembly](https://webassembly.org/)) にコンパイルできる [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) と呼ばれる [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) のサブセットで記述されます。 AssemblyScript は通常の TypeScript よりも厳密ですが、使い慣れた構文を提供します - -`subgraph.yaml`の`mapping.eventHandlers`で定義されている各イベントハンドラに対して、同じ名前のエクスポートされた関数を作成します。各ハンドラーは、処理される`event`の名前に対応するタイプの event という 1 つのパラメータを受け入れる必要があります。 - -例題のサブグラフでは、`src/mapping.ts`に`NewGravatar`と`UpdatedGravatar` イベントのハンドラが含まれています: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -最初のハンドラは、`NewGravatar`イベントを受け取り、`new Gravatar(event.params.id.toHex())`で新しい`Gravatar`エンティティを作成し、対応するイベント・パラメータを使ってエンティティ・フィールドを入力します。このエンティティのインスタンスは、`event.params.id.toHex()`の id 値を持つ変数`gravatar`で表されます。 - -2 番目のハンドラは、既存の`Gravatar`をグラフノードストアから読み込もうとします。もしまだ存在していなければ、オンデマンドで作成されます。エンティティは新しいイベント・パラメータに合わせて更新され、`gravatar.save()`を使ってストアに保存されます。 - -### 新規エンティティ作成時の推奨 ID - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## コード生成 - -スマートコントラクト、イベント、エンティティを簡単かつタイプセーフに扱うために、Graph CLIはサブグラフのGraphQLスキーマとデータソースに含まれるコントラクトABIからAssemblyScriptタイプを生成することができます。 - -これを行うためには - -```sh -graph codegen [--output-dir ] [] -で行うことができます。 -``` - -しかし、ほとんどの場合、`package.json`によってサブグラフがあらかじめ設定されているので、以下のいずれかを実行するだけで同じことが実現できます: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -これにより、`subgraph.yaml` で言及されている ABI ファイル内のすべてのスマート コントラクトに対して AssemblyScript クラスが生成され、これらのコントラクトをマッピング内の特定のアドレスにバインドし、ブロックに対して読み取り専用のコントラクト メソッドを呼び出すことができます。処理されます。また、すべてのコントラクト イベントのクラスを生成して、イベント パラメータ、およびイベントの発生元のブロックとトランザクションに簡単にアクセスできるようにします。これらの型はすべて `//.ts` に書き込まれます。サブグラフの例では、これは `generated/Gravity/Gravity.ts` になり、マッピングでこれらの型を次のようにインポートできます。 - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -これに加えて、サブグラフの GraphQL スキーマのエンティティタイプごとに 1 つのクラスが生成される。これらのクラスは、タイプセーフなエンティティのロード、エンティティ・フィールドへのリード・ライト・アクセスのほか、エンティティをストアに書き込むための`save()`メソッドを提供する。すべてのエンティティ・クラスは`/schema.ts`に書き込まれ、マッパーは以下のようにしてインポートすることができます。 - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **注:** GraphQL スキーマやマニフェストに含まれる ABI を変更するたびに、コード生成を再実行する必要があります。また、サブグラフをビルドまたはディプロイする前に、少なくとも一度は実行する必要があります。 - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## データソーステンプレート - -EVM 互換のスマート コントラクトの一般的なパターンは、レジストリ コントラクトまたはファクトリ コントラクトの使用です。1 つのコントラクトが、それぞれ独自の状態とイベントを持つ任意の数の他のコントラクトを作成、管理、または参照します。 - -これらのサブコントラクトのアドレスは、事前にわかっている場合とわかっていない場合があり、これらのコントラクトの多くは、時間の経過とともに作成および/または追加される可能性があります。このような場合、単一のデータ ソースまたは固定数のデータ ソースを定義することは不可能であり、より動的なアプローチ、つまり *データ ソース テンプレート*が必要とされるのはこのためです。 - -### メインコントラクトのデータソース - -まず、メインコントラクトの通常のデータソースを定義します。下のスニペットは [Uniswap](https://uniswap.org) exchange factory contract のデータソースの例を簡略化して示しています。`NewExchange(address,address)`イベントハンドラに注目してください。これはファクトリーコントラクトによってチェーン上に新しいエクスチェンジコントラクトが作成された際に発行されます。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### 動的に作成されるコントラクトのデータソーステンプレート - -次に、*data source templates*をマニフェストに追加します。データソース テンプレートは、`source`の下に定義済みのコントラクト アドレスがないことを除けば、通常のデータソースと同じです。一般的には、親コントラクトが管理または参照するサブコントラクトのタイプごとに 1 つのテンプレートを定義することになります。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### データソーステンプレートのインスタンス化 - -最後のステップでは、メインのコントラクト マッピングを更新して、テンプレートの 1 つからダイナミック データ ソース インスタンスを作成します。この例では、メインのコントラクトマッピングを変更して`Exchange`テンプレートをインポートし、`Exchange.create(address)`メソッドを呼び出して新しい Exchange コントラクトのインデックス作成を開始します。 - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **注:** 新しいデータ ソースは、それが作成されたブロックとそれに続くすべてのブロックの呼び出しとイベントのみを処理しますが、履歴データ (データなど) は処理しません。それは前のブロックに含まれています。 -> -> 以前のブロックに新しいデータソースに関連するデータが含まれている場合は、コントラクトの現在の状態を読み取り、新しいデータソースが作成された時点でその状態を表すエンティティを作成することで、そのデータにインデックスを付けることが最善です。 - -### データソースコンテクスト - -データソースコンテキストは、テンプレートをインスタンス化する際に追加の設定を渡すことができます。この例では、取引所が特定の取引ペアに関連付けられており、それが`NewExchange`イベントに含まれているとします。この情報は、インスタンス化されたデータソースに次のように渡すことができます。 - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -`Exchange`テンプレートのマッピングの中で、コンテキストにアクセスすることができます: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -すべての値の型に対して、`setString`や`getString`のようなセッターやゲッターがあります。 - -## スタートブロック(start Blocks) - -`startBlock`はオプションの設定で、データソースがチェーンのどのブロックからインデックス作成を開始するかを定義できます。開始ブロックを設定することで、データソースは無関係な何百万ものブロックをスキップすることができます。通常、サブグラフの開発者は`startBlock`をデータソースのスマートコントラクトが作成されたブロックに設定します。 - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **注:** コントラクト作成ブロックは、Etherscan ですばやく検索できます。 -> -> 1. 検索バーにアドレスを入力してコントラクトを検索します。 -> 2. `Contract Creator` セクションの作成トランザクションハッシュをクリックします。 -> 3. トランザクションの詳細ページを読み込んで、そのコントラクトの開始ブロックを見つけます。 - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## コールハンドラー - -イベントはコントラクトの状態に対する関連する変更を収集するための効果的な方法を提供しますが、多くのコントラクトはガスコストを最適化するためにログの生成を避けます。このような場合、サブグラフはデータソース・コントラクトに行われたコールを購読することができます。これは、関数シグネチャを参照するコールハンドラと、この関数へのコールを処理するマッピングハンドラを定義することで実現します。これらのコールを処理するために、マッピングハンドラは、コールへの入力とコールからの出力を型付けした`ethereum.Call`を引数として受け取ります。トランザクションのコールチェーンのどの深さで行われたコールでもマッピングがトリガーされ、プロキシコントラクトを介したデータソースコントラクトとのアクティビティをキャプチャすることができます。 - -コールハンドラーは、次の 2 つのケースのいずれかでのみトリガされます:指定された関数がコントラクト自身以外のアカウントから呼び出された場合、または Solidity で外部としてマークされ、同じコントラクト内の別の関数の一部として呼び出された場合。 - -> **Note:** コールハンドラは現在、ParityトレースAPIに依存しています。BNB chainやArbitrumのような特定のネットワークは、このAPIをサポートしていません。これらのネットワークのインデックスを持つサブグラフが1つ以上のコールハンドラを含む場合、同期を開始しません。サブグラフの開発者は、代わりにイベントハンドラを使用する必要があります。イベント・ハンドラはコール・ハンドラよりもはるかに高性能であり、すべてのevmネットワークでサポートされています。 - -### コールハンドラーの定義 - -マニフェストにコール ハンドラを定義するには、購読したいデータ ソースの下に `callHandlers`配列を追加します。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`function`は、コールをフィルタリングするための正規化された関数シグネチャです。`handler`プロパティは、ターゲット関数がデータソースコントラクトで呼び出されたときに実行したい、マッピング内の関数の名前です。 - -### マッピング関数 - -各コールハンドラは、呼び出された関数の名前に対応するタイプを持つ 1 つのパラメータを取ります。上のサブグラフの例では、マッピングは`createGravatar` 関数が呼び出されたときのハンドラを含み、引数として`CreateGravatarCall`パラメータを受け取ります: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -`handleCreateGravatar`関数は、`@graphprotocol/graph-ts`が提供する`ethereum.Call`のサブクラスである`CreateGravatarCall`を新たに受け取り、コールの型付けされた入出力を含みます。`CreateGravatarCall`のタイプは、`graph codegen`を実行したときに生成されます。 - -## ブロック・ハンドラー - -コントラクトイベントやファンクションコールの購読に加えて、サブグラフは、新しいブロックがチェーンに追加されると、そのデータを更新したい場合があります。これを実現するために、サブグラフは各ブロックの後、あるいは事前に定義されたフィルタにマッチしたブロックの後に、関数を実行することができます。 - -### 対応フィルター - -#### 通話フィルター - -```yaml -filter: - kind: call -``` - -_定義されたハンドラーは、ハンドラーが定義されているコントラクト(データソース)への呼び出しを含むすべてのブロックに対して一度だけ呼ばれます。_ - -> **Note:** コールハンドラは現在、ParityトレースAPIに依存しています。BNB chainやArbitrumのような特定のネットワークは、このAPIをサポートしていません。これらのネットワークのインデックスを持つサブグラフが1つ以上のコールハンドラを含む場合、同期を開始しません。サブグラフの開発者は、代わりにイベントハンドラを使用する必要があります。イベント・ハンドラはコール・ハンドラよりもはるかに高性能であり、すべてのevmネットワークでサポートされています。 - -ブロックハンドラーにフィルターがない場合、ハンドラーはブロックごとに呼び出されます。1 つのデータソースには、各フィルタータイプに対して 1 つのブロックハンドラーしか含めることができません。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### ポーリングフィルター - -> **`specVersion` >= 0.0.8 が必要です。** - -> **注:** ポーリング フィルタは、`kind: ethereum` の dataSource でのみ使用できます。 - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -定義されたハンドラーは、`n` ブロックごとに 1 回呼び出されます。`n` は、`every` フィールドで指定された値です。 この構成により、サブグラフが定期的なブロック間隔で特定の操作を実行できるようになります。 - -#### ワンスフィルター - -> **`specVersion` >= 0.0.8 が必要です。** - -> **注:** Once フィルタは、`kind: ethereum` の dataSource でのみ使用できます。 - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -Once フィルターを使用して定義されたハンドラーは、他のすべてのハンドラーが実行される前に 1 回だけ呼び出されます。 この構成により、サブグラフはハンドラーを初期化ハンドラーとして使用し、インデックス作成の開始時に特定のタスクを実行できるようになります。 - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### マッピング関数 - -マッピング関数は、唯一の引数として`ethereum.Block`を受け取ります。イベント用のマッピング関数と同様に、この関数はストア内の既存のサブグラフエンティティにアクセスしたり、スマートコントラクトを呼び出したり、エンティティを作成または更新したりすることができます。 - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## 匿名イベント - -Solidity で匿名イベントを処理する必要がある場合は、例のようにイベントのトピック 0 を提供することで実現できます: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -シグネチャと topic0 の両方が一致した場合にのみ、イベントが発生します。デフォルトでは、`topic0`はイベントシグネチャのハッシュと同じです。 - -## イベントハンドラにおけるトランザクションレシーブ - -`specVersion` `0.0.5` および `apiVersion` `0.0.7` 以降、イベント ハンドラーは、それらを発行したトランザクション - -これを行うには、イベント ハンドラをサブグラフ マニフェストで新しい `receipt: true` キー (オプション、デフォルトは false) を使用して宣言する必要があります。 - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -ハンドラ関数の内部では、レシートは `Event.receipt` フィールドでアクセスすることができます。`receipt` キーが `false` に設定されているか、マニフェストで省略されている場合、代わりに `null` 値が返されることになります。 - -## 実験的機能 - -`specVersion` `0.0.4`以降、サブグラフ機能はマニフェストファイルのトップレベルにある`features`セクションで、以下の表のように`camelCase` の名前を使って明示的に宣言する必要があります: - -| 特徴 | 名前 | -| ---------------------------------------------------- | ---------------- | -| [致命的でないエラー](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -例えば、サブグラフが**Full-Text Search** と**Non-fatal Errors**の機能を使用する場合、マニフェストの`features`フィールドは次のようになります: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -宣言せずに機能を使用すると、サブグラフの展開時に**validation error**が発生しますが、機能を宣言しても使用しなければエラーは発生しないことに注意してください。 - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### 致命的でないエラー - -すでに同期しているサブグラフのインデックスエラーは、デフォルトではサブグラフを失敗させ、同期を停止させます。サブグラフは、エラーが発生したハンドラーによる変更を無視することで、エラーが発生しても同期を継続するように設定することができます。これにより、サブグラフの作成者はサブグラフを修正する時間を得ることができ、一方でクエリは最新のブロックに対して提供され続けますが、エラーの原因となったバグのために結果が一貫していない可能性があります。なお、エラーの中には常に致命的なものもあり、致命的でないものにするためには、そのエラーが決定論的であることがわかっていなければなりません。 - -> **注:** グラフ ネットワークはまだ致命的ではないエラーをサポートしていないため、開発者はその機能を使用するサブグラフを Studio 経由でネットワークにデプロイしないでください。 - -非致命的エラーを有効にするには、サブグラフのマニフェストに以下の機能フラグを設定する必要があります: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - ... -``` - -クエリは、`subgraphError`引数を通じて、潜在的な不整合を持つデータのクエリをオプトインする必要があります。また、例のように、サブグラフがエラーをスキップしたかどうかを確認するために、`_meta`をクエリすることも推奨されます: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -サブグラフにエラーが発生した場合、そのクエリはデータと、`"indexing_error"`というメッセージを持つ graphql のエラーの両方を返します(以下のレスポンス例): - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### 既存のサブグラフへのグラフト - -> **注:** 最初に Graph Network にアップグレードするときにグラフティングを使用することはお勧めしません。 詳細については[こちら](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network)をご覧ください。 - -サブグラフが最初にデプロイされると、対応するチェーンのジェネシス ブロック (または各データ ソースで定義された `startBlock`) でイベントのインデックス作成が開始されます。既存のサブグラフのデータを再利用し、かなり後のブロックからインデックス作成を開始することは有益です。このインデックス作成モードは _グラフティング_ と呼ばれます。失敗した既存のサブグラフを迅速に、または一時的に再び機能させることができます。 - -`subgraph.yaml`のサブグラフマニフェストのトップレベルに`graft`ブロックがある場合、サブグラフはベースサブグラフにグラフトされます: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -マニフェストに`graft`ブロックが含まれるサブグラフがデプロイされると、グラフノードは `base` サブグラフのデータを、指定されたブロックまでコピーし、そのブロック以降の新しいサブグラフのインデックスを作成し続ける。ベースサブグラフは、対象となるグラフノードのインスタンス上に存在し、少なくとも与えられた`block`までのインデックスを持っている必要があります。このような制限があるため、グラフト化は開発時や緊急時に、グラフト化されていない同等のサブグラフの生成を早めるためにのみ使用するべきです。 - -グラフトはベースデータのインデックスではなくコピーを行うため、スクラッチからインデックスを作成するよりもサブグラフを目的のブロックに早く到達させることができますが、非常に大きなサブグラフの場合は最初のデータコピーに数時間かかることもあります。グラフトされたサブグラフが初期化されている間、グラフノードは既にコピーされたエンティティタイプに関する情報を記録します。 - -グラフト化されたサブグラフは、ベースとなるサブグラフのスキーマと同一ではなく、単に互換性のある GraphQL スキーマを使用することができます。また、それ自体は有効なサブグラフのスキーマでなければなりませんが、以下の方法でベースサブグラフのスキーマから逸脱することができます。 - -- エンティティタイプを追加または削除する -- エンティティタイプから属性を削除する -- エンティティタイプに nullable 属性を追加する -- null 化できない属性を null 化できる属性に変更する -- enums に値を追加する -- インターフェースの追加または削除 -- インターフェースがどのエンティティタイプに実装されるかを変更する - -> **[Feature Management](#experimental-features):**`grafting`はサブグラフマニフェストの`features`の下で宣言しなければなりません。 - -## IPFS/Arweave File Data Sources - -ファイルデータソースは、堅牢で拡張可能な方法でインデックス作成中にオフチェーンデータにアクセスするための新しいサブグラフ機能です。ファイルデータソースは、IPFS および Arweave からのファイルのフェッチをサポートしています。 - -> また、オフチェーンデータの決定論的なインデックス作成、および任意のHTTPソースデータの導入の可能性についても基礎ができました。 - -### 概要 - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> 既存の`ipfs.cat` APIを置き換えるものです。 - -### アップグレードガイド - -#### `graph-ts` および `graph-cli` を更新しました。 - -ファイルデータソースは、graph-ts >=0.29.0 および graph-cli >=0.33.1 が必要です。 - -#### ファイルが見つかったときに更新される新しいエンティティタイプを追加します。 - -ファファイルが見つかったときに更新される新しいエンティティタイプを追加します。イルデータソースは、チェーンベースのエンティティにアクセスしたり更新することはできませんが、ファイル固有のエンティティを更新する必要があります。 - -これは、既存のエンティティからフィールドを分離し、別のエンティティにリンクさせることを意味します。 - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -親エンティティと結果のファイルデータソースエンティティの間の関係が1:1である場合、最も単純なパターンは、IPFS CIDをルックアップとして使用して、親エンティティを結果のファイルエンティティにリンクすることです。新しいファイルベースのエンティティのモデリングに問題がある場合は、Discordに連絡してください。 - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### `種類: ファイル/ipfs` または `種類: ファイル/arweave` の新しいテンプレートデータソースを追加します> - -目的のファイルが特定されたときに生成されるデータソースです。 - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> 現在、`abis`が必要ですが、ファイル・データ・ソース内からコントラクトを呼び出すことはできません。 - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### ファイルを処理するハンドラーを新規に作成 - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -読みやすい文字列としてのファイルのCIDは、`dataSource`を介して次のようにアクセスできます: - -```typescript -const cid = dataSource.stringParam() -``` - -ハンドラーの例: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### 必要なときにファイルデータソースを起動する - -チェーンベースハンドラーの実行中に、ファイルデータソースを作成できるようになりました: - -- 自動生成された`templates`からテンプレートをインポートする。 -- マッピング内から `TemplateName.create(cid: string)` を呼び出します。この場合、cid は IPFS または Arweave の有効なコンテンツ識別子です - -IPFS の場合、グラフノードは [v0 および v1 コンテンツ識別子、および](https://docs.ipfs.tech/concepts/content-addressing/)ディレクトリを持つコンテンツ識別子 (例: `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) をサポートします。 - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -例: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -これにより、新しいファイル データ ソースが作成され、グラフ ノードの構成済み IPFS または Arweave エンドポイントがポーリングされ、見つからない場合は再試行されます。ファイルが見つかると、ファイルデータソースハンドラが実行されます。 - -この例では、親 `Token` エンティティと結果の `TokenMetadata` エンティティの間のルックアップとして CID を使用しています。 - -> 以前は、サブグラフ開発者が`ipfs.cat(CID)`を呼び出してファイルを取得するポイントでした。 - -おめでとうございます!ファイルデータソースが使用できます。 - -#### サブグラフのデプロイ - -任意のグラフノードに`build`および`deploy`できるようになりました >=v0.30.0-rc.0. - -#### 制限事項 - -ファイルデータソースハンドラおよびエンティティは、他のサブグラフエンティティから分離され、実行時に決定論的であることを保証し、チェーンベースのデータソースを汚染しないことを保証します。具体的には、以下の通りです。 - -- ファイルデータソースで作成されたエンティティは不変であり、更新することはできません。 -- ファイルデータソースハンドラは、他のファイルデータソースのエンティティにアクセスすることはできません。 -- ファイルデータソースに関連するエンティティは、チェーンベースハンドラーからアクセスできません。 - -> この制約は、ほとんどのユースケースで問題になることはありませんが、一部のユースケースでは複雑さをもたらすかもしれません。ファイルベースのデータをサブグラフでモデル化する際に問題がある場合は、Discordを通じてご連絡ください。 - -また、オンチェーンデータソースや他のファイルデータソースからデータソースを作成することはできません。この制限は、将来的に解除される可能性があります。 - -#### ベストプラクティス - -NFT メタデータを対応するトークンにリンクする場合、メタデータの IPFS ハッシュを使用して、トークン エンティティから Metadata エンティティを参照します。IPFSハッシュをIDとして使用してMetadataエンティティを保存します。 - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -複数回リフレッシュされるエンティティがある場合は、IPFSハッシュ&スタンプとエンティティIDを使用して一意のファイルベースのエンティティを作成し、チェーンベースのエンティティ内の派生フィールドを使用してそれらを参照します。 - -> クエリが「最新版」のみを返すように、上記の推奨事項を改善するよう取り組んでいます。 - -#### 既知の問題点 - -ファイル データ ソースは現在、ABI が使用されていないにもかかわらず、ABI を必要とします ([issue](https://github.com/graphprotocol/graph-cli/issues/961))。回避策は、任意のABIを追加することです。 - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### 例 - -[クリプトコヴェン・サブグラフの移動](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### 参考文献 - -[GIPファイルデータソース](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ja/developing/creating-a-subgraph/_meta.js b/website/pages/ja/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/ja/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ja/developing/graph-ts/_meta.js b/website/pages/ja/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/ja/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ja/managing/deprecate-a-subgraph.mdx b/website/pages/ja/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/ja/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/ja/mips-faqs.mdx b/website/pages/ja/mips-faqs.mdx deleted file mode 100644 index b9d0538f7fa5..000000000000 --- a/website/pages/ja/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## イントロダクション - -> 注意:2023年5月をもって、MIPsプログラムは終了しました。参加してくれたすべてのインデクサーに感謝します! - -The Graph エコシステムに参加できるのは今がエキサイティングな時期です。 [Graph Day 2022](https://thegraph.com/graph-day/2022/) 中に、Yaniv Tal は [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/) を発表しました。 )、グラフ エコシステムが長年にわたって取り組んできた瞬間です。 - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -MIPsプログラムは、Indexersをサポートするためのインセンティブプログラムで、Ethereumメインネット以外のチェーンをインデックスするためのリソースを提供し、The Graphプロトコルを分散型ネットワークを多チェーンのインフラストラクチャレイヤーに拡張するのを支援します。 - -MIPsプログラムは、GRT供給量の0.75%(75M GRT)を割り当てており、ネットワークをブートストラップするのに貢献するIndexersに0.5%が割り当てられ、マルチチェーンサブグラフを使用するサブグラフ開発者向けのネットワークグラントに0.25%が割り当てられています。 - -### 役立つリソース - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. サブグラフが失敗した場合でも、有効なインデックス付け証明 (POI) を生成することは可能ですか? - -はい、確かにそうです。 - -文脈としては、仲裁憲章 [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) では、失敗したサブグラフの POI を生成する方法が指定されています。 - -コミュニティ メンバーである [SunTzu](https://github.com/suntzu93) は、仲裁憲章の方法論に準拠してこのプロセスを自動化するスクリプトを作成しました。 リポジトリは [here](https://github.com/suntzu93/get_valid_poi_subgraph) をご覧ください。 - -### 2. MIP プログラムはどのチェーンを最初に奨励しますか? - -分散型ネットワークでサポートされる最初のチェーンはGnosis Chainです!以前はxDAIとして知られていましたが、Gnosis ChainはEVMベースのチェーンです。Gnosis Chainは、ノードの実行のユーザーフレンドリーさ、Indexerの準備度、The Graphとの整合性、およびweb3内での採用に基づいて、最初に選ばれました。 - -### 3. 新しいチェーンはどのようにして MIP プログラムに追加されますか? - -新しいチェーンはMIPsプログラムを通じて発表され、Indexerの準備度、需要、およびコミュニティの意向に基づいています。チェーンはまずテストネットでサポートされ、その後、そのチェーンをメインネットでサポートするためのGIPが承認されます。MIPsプログラムに参加するIndexersは、どのチェーンをサポートしたいかを選択し、サブグラフの提供に対するネットワークでのクエリ料金やインデックスの報酬に加えて、各チェーンごとに報酬を獲得します。MIPsの参加者は、パフォーマンス、ネットワークのニーズへの対応能力、およびコミュニティの支持に基づいて評価されます。 - -### 4. ネットワークが新しいチェーンの準備ができたことをどのようにして知ることができますか? - -The Graph Foundationは、QoSパフォーマンスメトリクス、ネットワークのパフォーマンス、およびコミュニティチャネルを監視し、最適な準備状況を評価します。最優先事項は、マルチチェーンのdappがサブグラフを移行できるように、ネットワークがパフォーマンスの要件を満たすことです。 - -### 5. 報酬はチェーンごとにどのように分割されますか? - -各チェーンは、ノードの同期に必要な要件やクエリのボリューム、採用度に違いがあるため、各チェーンごとの報酬はそのチェーンのサイクルの最後に決定されます。これにより、すべてのフィードバックと学びを収集できます。ただし、いつでもIndexersはネットワークでサポートされると、クエリ料金とインデックスの報酬を獲得できるようになります。 - -### 6. MIP プログラム内のすべてのチェーンにインデックスを付ける必要がありますか、それとも 1 つのチェーンだけを選択してインデックスを付けることができますか? - -どのチェーンでもインデックスを行うことができます!MIPsプログラムの目標は、Indexersに希望するチェーンをインデックスし、興味を持つweb3エコシステムをサポートするためのツールと知識を提供することです。ただし、すべてのチェーンにはテストネットからメインネットまでのフェーズがあります。インデックスを行うチェーンのすべてのフェーズを完了するようにしてください。フェーズについて詳しくは[The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059)をご覧ください。 - -### 7. 報酬はいつ配布されますか? - -MIPsの報酬は、パフォーマンスメトリクスが達成され、移行されたサブグラフがそれらのIndexersによってサポートされると、チェーンごとに分配されます。チェーンのサイクルの中間点を通じて、各チェーンごとの総報酬に関する情報をお待ちください。 - -### 8. スコアリングはどのように行われますか? - -インデクサーは、プログラム全体のリーダーボードのスコアに基づいて報酬を求めて競い合います。 プログラムの採点は以下に基づいて行われます。 - -**Subgraph Coverage** - -- チェーンごとのサブグラフに最大限のサポートを提供していますか? - -- MIP 中に、大規模なインデクサーは、サポートするチェーンごとに 50% 以上のサブグラフをステークすることが期待されます。 - -**Quality Of Service** - -- インデクサーは、良好なサービス品質 (遅延、最新データ、稼働時間など) でチェーンにサービスを提供していますか? - -- インデクサーは dapp 開発者をサポートし、彼らのニーズに対応していますか? - -インデクサーは効率的に割り当てを行っており、ネットワーク全体の健全性に貢献していますか? - -**Community Support** - -- Indexer は他の Indexer と協力して、マルチチェーンのセットアップを支援していますか? - -- インデクサーはプログラム全体を通じてコア開発者にフィードバックを提供していますか、それともフォーラムでインデクサーと情報を共有していますか? - -### 9. Discord の役割はどのように割り当てられますか? - -モデレータは数日以内に役割を割り当てます。 - -### 10. テストネットでプログラムを開始してからメインネットに切り替えても問題ありませんか? 私のノードを特定して、報酬を配布する際に考慮してもらえますか? - -はい、実際にそれを行うことが期待されています。いくつかのフェーズはGörliで行われ、1つはメインネットで行われます。 - -### 11. どの時点で参加者がメインネットのデプロイメントを追加すると予想されますか? - -フェーズ 3 では、メインネット インデクサーが必要になります。これに関する詳細は、[shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. 報酬は権利確定の対象になりますか? - -プログラムの最後に分配される割合はベスティングの対象となります。詳細についてはインデクサー契約で共有されます。 - -### 13. 複数のメンバーがいるチームの場合、チームメンバー全員に MIPs Discord の役割が与えられますか? - -はい - -### 14. グラフ キュレーター プログラムのロックされたトークンを使用して MIP テストネットに参加することはできますか? - -はい - -### 15. MIP プログラム中に、無効な POI に対して異議を申し立てる期間はありますか? - -決まっていません。詳細については定期的にこのページを確認するか、緊急の場合はinfo@thegraph.foundationまでメールでお問い合わせください。 - -### 17. 2 つの権利確定契約を組み合わせることはできますか? - -いいえ。選択肢は以下の通りです:どちらか一方に委任するか、2つの別々のインデクサを実行することができます。 - -### 18. KYCに関する質問? - -Info@thegraph.foundation にメールしてください。 - -### 19. Gnosis チェーンにインデックスを付ける準備ができていません。準備ができたら、すぐに別のチェーンからインデックスを作成できますか? - -はい - -### 20. サーバーを実行するのに推奨されるリージョンはありますか? - -地域についての推奨事項は提供していません。場所を選ぶ際に考慮すべきポイントとして、仮想通貨の主要な市場がどこにあるかを考えることができます。 - -### 21.「ハンドラーガスコスト」とは何ですか? - -これはハンドラを実行するためのコストの決定論的な尺度です。名前から連想されるかもしれませんが、これはブロックチェーン上のガスコストとは関係ありません。 diff --git a/website/pages/ja/network/_meta.js b/website/pages/ja/network/_meta.js index 42f64bc63000..49858537c885 100644 --- a/website/pages/ja/network/_meta.js +++ b/website/pages/ja/network/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - overview: '概要', } diff --git a/website/pages/ja/querying/_meta.js b/website/pages/ja/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/ja/querying/_meta.js +++ b/website/pages/ja/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/ja/querying/graph-client/_meta.js b/website/pages/ja/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/ja/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ko/_meta.js b/website/pages/ko/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/ko/_meta.js +++ b/website/pages/ko/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/ko/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ko/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/ko/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/ko/deploying/hosted-service.mdx b/website/pages/ko/deploying/hosted-service.mdx deleted file mode 100644 index 1ea86b96a573..000000000000 --- a/website/pages/ko/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Create a Subgraph - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/ko/deploying/subgraph-studio.mdx b/website/pages/ko/deploying/subgraph-studio.mdx deleted file mode 100644 index f2da63abff0b..000000000000 --- a/website/pages/ko/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/ko/developing/creating-a-subgraph.mdx b/website/pages/ko/developing/creating-a-subgraph.mdx deleted file mode 100644 index e38d897919f8..000000000000 --- a/website/pages/ko/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Overview - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ko/developing/creating-a-subgraph/_meta.js b/website/pages/ko/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/ko/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ko/developing/graph-ts/_meta.js b/website/pages/ko/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/ko/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ko/managing/deprecate-a-subgraph.mdx b/website/pages/ko/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/ko/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/ko/mips-faqs.mdx b/website/pages/ko/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/ko/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/ko/network/_meta.js b/website/pages/ko/network/_meta.js index 387f6792941e..49858537c885 100644 --- a/website/pages/ko/network/_meta.js +++ b/website/pages/ko/network/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - overview: '개요', } diff --git a/website/pages/ko/querying/_meta.js b/website/pages/ko/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/ko/querying/_meta.js +++ b/website/pages/ko/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/ko/querying/graph-client/_meta.js b/website/pages/ko/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/ko/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/mr/_meta.js b/website/pages/mr/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/mr/_meta.js +++ b/website/pages/mr/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/mr/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/mr/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 09dfd5edc8af..000000000000 --- a/website/pages/mr/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: होस्ट केलेल्या सेवेसाठी सबग्राफ तैनात करणे ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## प्रवेश टोकन साठवा - -खाते तयार केल्यानंतर, तुमच्या [डॅशबोर्ड](https://thegraph.com/hosted-service/dashboard) वर नेव्हिगेट करा. डॅशबोर्डवर प्रदर्शित केलेले प्रवेश टोकन कॉपी करा आणि `graph auth --product hosted-service ` चालवा. हे आपल्या संगणकावर प्रवेश टोकन संचयित करेल. तुम्हाला हे फक्त एकदाच करावे लागेल, किंवा तुम्ही कधीही ऍक्सेस टोकन पुन्हा निर्माण केल्यास. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**प्रतिमा** - पूर्वावलोकन प्रतिमा आणि सबग्राफसाठी लघुप्रतिमा म्हणून वापरण्यासाठी प्रतिमा निवडा. - -**सबग्राफ नाव** - सबग्राफ तयार केलेल्या खात्याच्या नावासह, हे `account-name/subgraph-name`-शैली देखील परिभाषित करेल उपयोजन आणि GraphQL एंडपॉइंटसाठी वापरलेले नाव. _हे फील्ड नंतर बदलता येणार नाही._ - -**खाते** - सबग्राफ अंतर्गत तयार केलेले खाते. हे एखाद्या व्यक्तीचे किंवा संस्थेचे खाते असू शकते. _सबग्राफ नंतर खात्यांमध्ये हलवता येणार नाहीत._ - -**उपशीर्षक** - सबग्राफ कार्डमध्ये दिसणारा मजकूर. - -**वर्णन** - सबग्राफचे वर्णन, सबग्राफ तपशील पृष्ठावर दृश्यमान. - -**GitHub URL** - GitHub वरील सबग्राफ रेपॉजिटरीशी लिंक. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -तुम्ही `यार्न डिप्लॉय` चालवून सबग्राफ उपयोजित करता - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -एकदा ग्राफ नोडने ऐतिहासिक ब्लॉक्समधून सर्व डेटा काढल्यानंतर सबग्राफ स्थिती `सिंक` वर स्विच होते. ग्राफ नोड तुमच्या सबग्राफसाठी ब्लॉक्सची तपासणी करणे सुरू ठेवेल कारण हे ब्लॉक्स खनन केले जातात. - -## सबग्राफ पुन्हा तैनात करणे - -तुमच्या सबग्राफ व्याख्येमध्ये बदल करताना, उदाहरणार्थ, एंटिटी मॅपिंगमधील समस्येचे निराकरण करण्यासाठी, तुमच्या सबग्राफची अपडेट केलेली आवृत्ती उपयोजित करण्यासाठी वरील `यार्न डिप्लॉय` कमांड पुन्हा चालवा. सबग्राफच्या कोणत्याही अपडेटसाठी आवश्यक आहे की ग्राफ नोड तुमचा संपूर्ण सबग्राफ पुन्हा अनुक्रमित करेल, पुन्हा जेनेसिस ब्लॉकपासून सुरू होईल. - -तुमचा पूर्वी तैनात केलेला सबग्राफ अजूनही `सिंक करत आहे` स्थितीत असल्यास, तो ताबडतोब नवीन उपयोजित आवृत्तीसह बदलला जाईल. जर पूर्वी उपयोजित सबग्राफ आधीपासून पूर्णपणे समक्रमित केला असेल, तर ग्राफ नोड नवीन उपयोजित आवृत्तीला `प्रलंबित आवृत्ती` म्हणून चिन्हांकित करेल, पार्श्वभूमीमध्ये समक्रमित करेल आणि एकदा समक्रमित केल्यानंतर फक्त सध्या उपयोजित आवृत्ती नवीनसह पुनर्स्थित करेल. नवीन आवृत्ती पूर्ण झाली. हे सुनिश्चित करते की नवीन आवृत्ती समक्रमित होत असताना आपल्याकडे कार्य करण्यासाठी सबग्राफ आहे. - -## एकाधिक नेटवर्कवर सबग्राफ तैनात करणे - -काही प्रकरणांमध्ये, तुम्हाला समान सबग्राफ एकाधिक नेटवर्कवर त्याच्या कोडची नक्कल न करता उपयोजित करायचा असेल. यासह येणारे मुख्य आव्हान हे आहे की या नेटवर्कवरील कराराचे पत्ते वेगळे आहेत. - -### ग्राफ-क्ली वापरणे - -`ग्राफ बिल्ड` (`v0.29.0` पासून) आणि `graph deploy` (`v0.32.0` पासून) दोन नवीन पर्याय स्वीकारतात: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -तुम्‍ही सहज अपडेट करण्‍यासाठी `json` मानक फाइल (डिफॉल्‍ट `networks.json`) वरून नेटवर्क कॉन्फिगरेशन निर्दिष्ट करण्‍यासाठी `--network` पर्याय वापरू शकता विकासादरम्यान सबग्राफ. - -**टीप:** `init` कमांड आता प्रदान केलेल्या माहितीवर आधारित `networks.json` स्वयं-व्युत्पन्न करेल. त्यानंतर तुम्ही विद्यमान अद्ययावत करण्यात किंवा अतिरिक्त नेटवर्क जोडण्यास सक्षम असाल. - -तुमच्याकडे `networks.json` फाइल नसल्यास, तुम्हाला खालील संरचनेसह मॅन्युअली तयार करणे आवश्यक आहे: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**टीप:** तुम्हाला कॉन्फिगरेशन फाइलमध्ये कोणतेही `टेम्पलेट` (जर तुमच्याकडे असतील तर) निर्दिष्ट करण्याची गरज नाही, फक्त ` डेटास्रोत`. `subgraph.yaml` फाइलमध्ये घोषित केलेले कोणतेही `टेम्पलेट` असल्यास, त्यांचे नेटवर्क `--network` पर्यायासह निर्दिष्ट केलेल्या नेटवर्कवर स्वयंचलितपणे अद्यतनित केले जाईल. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -तुमची नेटवर्क कॉन्फिगरेशन फाइल अशी दिसली पाहिजे: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -आता आपण खालीलपैकी एक कमांड रन करू शकतो: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -आता तुम्ही `यार्न डिप्लॉय` करण्यासाठी तयार आहात. - -**टीप:** आधी सांगितल्याप्रमाणे, `graph-cli 0.32.0` पासून तुम्ही थेट `यार्न डिप्लॉय` चालवू शकता `--नेटवर्क` पर्याय: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### वापरत आहे subgraph.yaml टेम्पलेट - -जुन्या ग्राफ-क्ली आवृत्त्यांसाठी एक उपाय जो करार पत्त्यांसारख्या पैलूंचे पॅरामीटराइझ करण्याची परवानगी देतो तो म्हणजे [Mustache](https://mustache.github.io/) किंवा [हँडलबार](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -आणि - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -त्यासोबत, तुम्ही मॅनिफेस्टमधील नेटवर्कचे नाव आणि पत्ते बदलून व्हेरिएबल प्लेसहोल्डर्स `{{network}}` आणि `{{address}}` आणि मॅनिफेस्टचे नाव बदलून उदा. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -कोणत्याही नेटवर्कवर मॅनिफेस्ट जनरेट करण्‍यासाठी, तुम्ही `package.json` वर `मशी` अवलंबित्वासह दोन अतिरिक्त कमांड जोडू शकता: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -याचे एक कार्यरत उदाहरण [येथे](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759) आढळू शकते. - -**टीप:** हा दृष्टीकोन अधिक जटिल परिस्थितींमध्ये देखील लागू केला जाऊ शकतो, जेथे करार पत्ते आणि नेटवर्क नावांपेक्षा अधिक बदलणे आवश्यक आहे किंवा जेथे टेम्पलेट्समधून मॅपिंग किंवा ABI तयार करणे आवश्यक आहे. - -## सबग्राफ आरोग्य तपासत आहे - -जर सबग्राफ यशस्वीरित्या समक्रमित झाला, तर ते कायमचे चांगले चालत राहण्याचे चांगले चिन्ह आहे. तथापि, नेटवर्कवरील नवीन ट्रिगर्समुळे तुमच्या सबग्राफची चाचणी न केलेली त्रुटी स्थिती येऊ शकते किंवा कार्यप्रदर्शन समस्यांमुळे किंवा नोड ऑपरेटरमधील समस्यांमुळे ते मागे पडू शकते. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -हे तुम्हाला `chainHeadBlock` देईल ज्याची तुलना तुम्ही तुमच्या सबग्राफवरील `latestBlock` शी तुलना करू शकता की ते मागे आहे की नाही हे तपासण्यासाठी. `समक्रमित` सबग्राफ कधीही साखळीत अडकला असेल तर कळवतो. `आरोग्य` सध्या कोणतीही त्रुटी न आल्यास `हेल्दी` ची मूल्ये घेऊ शकते किंवा सबग्राफची प्रगती थांबवणारी त्रुटी असल्यास `अयशस्वी`. या प्रकरणात, या त्रुटीच्या तपशीलांसाठी तुम्ही `fatalError` फील्ड तपासू शकता. - -## होस्ट केलेले सेवा सबग्राफ संग्रहण धोरण - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## सबग्राफ स्टुडिओ सबग्राफ संग्रहण धोरण - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -या धोरणामुळे प्रभावित झालेल्या प्रत्येक सबग्राफला प्रश्नातील आवृत्ती परत आणण्याचा पर्याय आहे. diff --git a/website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 5653a1339fe2..000000000000 --- a/website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: सबग्राफ स्टुडिओमध्ये सबग्राफ तैनात करणे ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- ग्राफ सीएलआय स्थापित करा (यार्न किंवा एनपीएमसह) -- सबग्राफ स्टुडिओमध्ये तुमचा सबग्राफ तयार करा -- CLI वरून तुमचे खाते प्रमाणित करा -- सबग्राफ स्टुडिओमध्ये सबग्राफ तैनात करणे - -## आलेख CLI स्थापित करत आहे - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**यार्नसह स्थापित करा:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**सह स्थापित करा npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## सबग्राफ स्टुडिओमध्ये तुमचा सबग्राफ तयार करा - -तुमचा वास्तविक सबग्राफ उपयोजित करण्यापूर्वी तुम्हाला [सबग्राफ स्टुडिओ](https://thegraph.com/studio/) मध्ये सबग्राफ तयार करणे आवश्यक आहे. याबद्दल अधिक जाणून घेण्यासाठी तुम्ही आमचे [स्टुडिओ दस्तऐवज](/deploying/subgraph-studio) वाचा अशी आम्ही शिफारस करतो. - -## तुमचा सबग्राफ सुरू करा - -एकदा तुमचा सबग्राफ सबग्राफ स्टुडिओमध्ये तयार झाला की तुम्ही या कमांडचा वापर करून सबग्राफ कोड सुरू करू शकता: - -```bash -आलेख init --studio -``` - -`` मूल्य सबग्राफ स्टुडिओमधील तुमच्या सबग्राफ तपशील पृष्ठावर आढळू शकते: - -![सबग्राफ स्टुडिओ - स्लग](/img/doc-subgraph-slug.png) - -`ग्राफ इनिट` चालवल्यानंतर, तुम्हाला कॉन्ट्रॅक्ट अॅड्रेस, नेटवर्क आणि एबीआय इनपुट करण्यास सांगितले जाईल ज्याची तुम्हाला क्वेरी करायची आहे. असे केल्याने तुमच्या स्थानिक मशीनवर तुमच्या सबग्राफवर काम सुरू करण्यासाठी काही मूलभूत कोडसह एक नवीन फोल्डर तयार होईल. त्यानंतर तुमचा सबग्राफ अपेक्षेप्रमाणे कार्य करतो याची खात्री करण्यासाठी तुम्ही अंतिम रूप देऊ शकता. - -## आलेख प्रमाणीकरण - -सबग्राफ स्टुडिओमध्ये तुमचा सबग्राफ उपयोजित करण्यास सक्षम होण्यापूर्वी, तुम्हाला CLI मध्ये तुमच्या खात्यात लॉग इन करणे आवश्यक आहे. हे करण्‍यासाठी, तुम्‍हाला तुमच्‍या डिप्लॉय की आवश्‍यक असेल जी तुम्‍हाला तुमच्‍या "माझे सबग्राफ" पृष्‍ठावर किंवा तुमच्‍या सबग्राफ तपशील पृष्‍ठावर मिळेल. - -CLI वरून प्रमाणीकरण करण्यासाठी तुम्हाला वापरण्याची आवश्यकता असलेली कमांड येथे आहे: - -```bash -graph auth --studio -``` - -## सबग्राफ स्टुडिओमध्ये सबग्राफ तैनात करणे - -तुम्ही तयार झाल्यावर, तुम्ही तुमचा सबग्राफ सबग्राफ स्टुडिओमध्ये तैनात करू शकता. असे केल्याने तुमचा सबग्राफ विकेंद्रित नेटवर्कवर प्रकाशित होणार नाही, तो फक्त तुमच्या स्टुडिओ खात्यावर तैनात करेल जिथे तुम्ही त्याची चाचणी करू शकाल आणि मेटाडेटा अपडेट करू शकाल. - -तुमचा सबग्राफ उपयोजित करण्यासाठी तुम्हाला वापरण्याची आवश्यकता असलेली CLI कमांड येथे आहे. - -```bash -graph deploy --studio -``` - -ही आज्ञा चालवल्यानंतर, CLI आवृत्ती लेबलसाठी विचारेल, तुम्हाला हवे तसे नाव देऊ शकता, तुम्ही `0.1` आणि `0.2` सारखी लेबले वापरू शकता किंवा अक्षरे देखील वापरू शकता. जसे की `uniswap-v2-0.1`. ती लेबले ग्राफ एक्सप्लोररमध्ये दृश्यमान असतील आणि क्युरेटर्सना या आवृत्तीवर सिग्नल द्यायचे आहेत की नाही हे ठरवण्यासाठी ते वापरले जाऊ शकतात, म्हणून ती हुशारीने निवडा. - -एकदा उपयोजित केल्यावर, तुम्ही खेळाच्या मैदानाचा वापर करून सबग्राफ स्टुडिओमध्ये तुमच्या सबग्राफची चाचणी करू शकता, आवश्यक असल्यास दुसरी आवृत्ती उपयोजित करू शकता, मेटाडेटा अपडेट करू शकता आणि तुम्ही तयार असाल तेव्हा, तुमचा सबग्राफ ग्राफ एक्सप्लोररवर प्रकाशित करा. diff --git a/website/pages/mr/deploying/hosted-service.mdx b/website/pages/mr/deploying/hosted-service.mdx deleted file mode 100644 index 8906fff59659..000000000000 --- a/website/pages/mr/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: होस्ट केलेली सेवा काय आहे? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -सर्वसमावेशक सूचीसाठी, [समर्थित नेटवर्क](/developing/supported-networks/#hosted-service) पहा. - -## सबग्राफ तयार करा - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### विद्यमान करारातून - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---नेटवर्क \ ---abi \ -``` - -या प्रकरणात `` हे तुमचे GitHub वापरकर्ता किंवा संस्थेचे नाव आहे, `` हे तुमच्या सबग्राफचे नाव आहे आणि `` हे निर्देशिकेचे पर्यायी नाव आहे जेथे `graph init` उदाहरण सबग्राफ मॅनिफेस्ट ठेवेल. `` हा तुमच्या विद्यमान कराराचा पत्ता आहे. `` हे त्या नेटवर्कचे नाव आहे ज्यावर करार राहतो. `` हा करार ABI फाइलचा स्थानिक मार्ग आहे. **दोन्ही `--नेटवर्क` आणि `--abi` पर्यायी आहेत.** - -### सबग्राफच्या उदाहरणावरून - -दुसरा मोड `graph init` सपोर्ट करतो तो उदाहरण सबग्राफमधून नवीन प्रोजेक्ट तयार करतो. खालील कमांड हे करते: - -``` -आलेख आरंभ --from-उदाहरण --उत्पादन होस्टेड-सेवा / [<निर्देशिका>] -``` - -उदाहरण सबग्राफ हे Dani Grant च्या गुरुत्वाकर्षण करारावर आधारित आहे जे वापरकर्ता अवतार व्यवस्थापित करते आणि `NewGravatar` किंवा `UpdateGravatar` इव्हेंट जेव्हाही अवतार तयार किंवा अपडेट केले जातात. सबग्राफ ग्राफ नोड स्टोअरमध्ये `Gravatar` संस्था लिहून आणि हे इव्हेंटनुसार अपडेट केले जातील याची खात्री करून हे इव्हेंट हाताळते. तुमच्या स्मार्ट कॉन्ट्रॅक्टमधील कोणत्या इव्हेंटकडे लक्ष द्यावे, मॅपिंग आणि बरेच काही अधिक चांगल्या प्रकारे समजून घेण्यासाठी [सबग्राफ मॅनिफेस्ट](/developing/creating-a-subgraph#the-subgraph-manifest) वर सुरू ठेवा. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -तुम्ही समर्थित नेटवर्कची सूची [येथे](/developing/supported-networks) शोधू शकता. diff --git a/website/pages/mr/deploying/subgraph-studio.mdx b/website/pages/mr/deploying/subgraph-studio.mdx deleted file mode 100644 index b0e1393396ea..000000000000 --- a/website/pages/mr/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -तुमच्या नवीन लॉन्चपॅडवर स्वागत आहे 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- स्टुडिओ UI द्वारे सबग्राफ तयार करा -- CLI वापरून सबग्राफ उपयोजित करा -- स्टुडिओ UI सह सबग्राफ प्रकाशित करा -- खेळाच्या मैदानात त्याची चाचणी घ्या -- क्वेरी URL वापरून स्टेजिंगमध्ये समाकलित करा -- विशिष्ट सबग्राफसाठी तुमच्या API की तयार करा आणि व्यवस्थापित करा - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -सबग्राफ क्वेरी करणे क्वेरी फी व्युत्पन्न करते, जी ग्राफ नेटवर्कवर [इंडेक्सर्स](/network/indexing) ला बक्षीस देण्यासाठी वापरली जाते. तुम्ही dapp डेव्हलपर किंवा सबग्राफ डेव्हलपर असल्यास, स्टुडिओ तुम्हाला तुमच्या किंवा तुमच्या समुदायाच्या प्रश्नांना सामर्थ्य देण्यासाठी चांगले सबग्राफ तयार करण्यास सक्षम करेल. स्टुडिओमध्ये 5 मुख्य भाग आहेत: - -- तुमचे वापरकर्ता खाते नियंत्रणे -- तुम्ही तयार केलेल्या सबग्राफची सूची -- व्यवस्थापित करण्यासाठी, तपशील पाहण्यासाठी आणि विशिष्ट सबग्राफची स्थिती दृश्यमान करण्यासाठी विभाग -- तुमच्‍या API की व्‍यवस्‍थापित करण्‍यासाठी एक विभाग जो तुम्‍हाला सबग्राफ क्‍वेरी करण्‍याची आवश्‍यकता असेल -- तुमचे बिलिंग व्यवस्थापित करण्यासाठी विभाग - -## तुमचे खाते कसे तयार करावे - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. एकदा तुम्ही साइन इन केल्यानंतर, तुम्हाला तुमच्या खात्याच्या मुख्यपृष्ठावर तुमची अनन्य उपयोजन की दिसेल. हे तुम्हाला तुमचे सबग्राफ प्रकाशित करण्यास किंवा तुमच्या API की + बिलिंग व्यवस्थापित करण्यास अनुमती देईल. तुमच्याकडे एक अनन्य डिप्लॉय की असेल जी तुम्हाला वाटत असेल की ती तडजोड केली गेली असेल तर ती पुन्हा व्युत्पन्न केली जाऊ शकते. - -## How to Create a Subgraph in Subgraph Studio - - - -## ग्राफ नेटवर्कसह सबग्राफ सुसंगतता - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- [समर्थित नेटवर्क](/developing/supported-networks) अनुक्रमित करा -- खालीलपैकी कोणतीही वैशिष्ट्ये वापरू नयेत: - - ipfs.cat & ipfs.map - - गैर-घातक त्रुटी - - कलम करणे - -अधिक वैशिष्ट्ये & ग्राफ नेटवर्कमध्ये नेटवर्क वाढीवपणे जोडले जातील. - -### सबग्राफ जीवनचक्र प्रवाह - -![सबग्राफ लाइफसायकल](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## सबग्राफ स्टुडिओमध्ये तुमच्या सबग्राफची चाचणी करत आहे - -तुम्ही तुमचा सबग्राफ नेटवर्कवर प्रकाशित करण्यापूर्वी त्याची चाचणी घेऊ इच्छित असल्यास, तुम्ही हे सबग्राफ **प्लेग्राउंड** मध्ये करू शकता किंवा तुमचे लॉग पाहू शकता. सबग्राफ लॉग तुम्हाला सांगतील की तुमचा सबग्राफ अयशस्वी झाल्यास ते **कोठे** आहे. - -## सबग्राफ स्टुडिओमध्ये तुमचा सबग्राफ प्रकाशित करा - -तुम्ही इथपर्यंत पोहोचलात - अभिनंदन! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -खालील व्हिडिओ विहंगावलोकन देखील पहा: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -इंडेक्सर्सना विशिष्ट ब्लॉक हॅश प्रमाणे इंडेक्सिंग रेकॉर्डचा अनिवार्य पुरावा सादर करणे आवश्यक आहे. सबग्राफ प्रकाशित करणे ही ऑन-चेन केलेली क्रिया असल्याने, लक्षात ठेवा की व्यवहार पूर्ण होण्यासाठी काही मिनिटे लागू शकतात. करार प्रकाशित करण्यासाठी तुम्ही वापरत असलेला कोणताही पत्ता भविष्यातील आवृत्त्या प्रकाशित करण्यास सक्षम असेल. हुशारीने निवडा! - -क्युरेशन सिग्नलसह सबग्राफ इंडेक्सर्सना दाखवले जातात जेणेकरून ते विकेंद्रित नेटवर्कवर अनुक्रमित केले जाऊ शकतात. तुम्ही एका व्यवहारात सबग्राफ आणि सिग्नल प्रकाशित करू शकता, जे तुम्हाला सबग्राफवर पहिले क्युरेशन सिग्नल मिंट करू देते आणि गॅसच्या खर्चात बचत करते. क्युरेटर्सनी नंतर प्रदान केलेल्या सिग्नलमध्ये तुमचा सिग्नल जोडून, तुमच्या सबग्राफमध्ये शेवटी क्वेरी पूर्ण करण्याची उच्च संधी असेल. - -**आता तुम्ही तुमचा सबग्राफ प्रकाशित केला आहे, चला ते नियमितपणे कसे व्यवस्थापित कराल ते पाहू या.** लक्षात ठेवा की जर तुमचा सबग्राफ असेल तर तुम्ही नेटवर्कवर प्रकाशित करू शकत नाही अयशस्वी समक्रमण. हे सहसा असे आहे कारण सबग्राफमध्ये बग आहेत - त्या समस्या कुठे अस्तित्वात आहेत हे लॉग तुम्हाला सांगतील! - -## CLI सह तुमच्या सबग्राफची आवृत्ती करणे - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -कृपया लक्षात घ्या की नेटवर्कवर सबग्राफची नवीन आवृत्ती प्रकाशित करण्याशी संबंधित खर्च आहेत. व्यवहार शुल्काव्यतिरिक्त, विकसकांनी स्वयं-स्थलांतर सिग्नलवरील क्युरेशन कराचा एक भाग देखील निधी दिला पाहिजे. क्युरेटर्सनी त्यावर संकेत न दिल्यास तुम्ही तुमच्या सबग्राफची नवीन आवृत्ती प्रकाशित करू शकत नाही. क्युरेशनच्या जोखमींबद्दल अधिक माहितीसाठी, कृपया [येथे](/network/curating) अधिक वाचा. - -### सबग्राफ आवृत्त्यांचे स्वयंचलित संग्रहण - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![सबग्राफ स्टुडिओ -अनआर्काइव](/img/Unarchive.png) diff --git a/website/pages/mr/developing/creating-a-subgraph.mdx b/website/pages/mr/developing/creating-a-subgraph.mdx deleted file mode 100644 index e001436592e7..000000000000 --- a/website/pages/mr/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: सबग्राफ तयार करणे ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![सबग्राफ परिभाषित करणे](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: एक GraphQL स्कीमा जो तुमच्या सबग्राफसाठी कोणता डेटा संग्रहित केला जातो आणि GraphQL द्वारे त्याची क्वेरी कशी करावी हे परिभाषित करते - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -आलेख CLI JavaScript मध्ये लिहिलेले आहे, आणि ते वापरण्यासाठी तुम्हाला `यार्न` किंवा `npm` स्थापित करावे लागेल; असे गृहीत धरले जाते की तुमच्याकडे पुढील गोष्टींमध्ये सूत आहे. - -तुमच्याकडे `यार्न` आल्यावर, चालवून आलेख CLI स्थापित करा - -**यार्नसह स्थापित करा:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**सह स्थापित करा npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -खालील कमांड एक सबग्राफ तयार करते जे विद्यमान कराराच्या सर्व घटनांना अनुक्रमित करते. ते इथरस्कॅन वरून ABI करार मिळवण्याचा प्रयत्न करते आणि स्थानिक फाइल मार्गाची विनंती करण्यासाठी परत येते. पर्यायी युक्तिवादांपैकी कोणतेही गहाळ असल्यास, ते तुम्हाला परस्परसंवादी फॉर्ममधून घेऊन जाते. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -दुसरा मोड `graph init` सपोर्ट करतो तो उदाहरण सबग्राफमधून नवीन प्रोजेक्ट तयार करतो. खालील कमांड हे करते: - -```sh -आलेख init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -`add` कमांड इथरस्कॅनमधून ABI आणेल (जोपर्यंत ABI पथ `--abi` पर्यायाने निर्दिष्ट केला जात नाही), आणि नवीन `डेटास्रोत` तयार करेल. > त्याच प्रकारे `graph init` कमांड `डेटास्रोत` `---करारातून` तयार करते, त्यानुसार स्कीमा आणि मॅपिंग अद्यतनित करते. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **टीप:** परस्परसंवादी क्ली वापरताना, यशस्वीरित्या `ग्राफ इनिट` चालवल्यानंतर, तुम्हाला एक नवीन `डेटास्रोत` जोडण्यासाठी सूचित केले जाईल. - -## सबग्राफ मॅनिफेस्ट - -सबग्राफ मॅनिफेस्ट `subgraph.yaml` स्मार्ट कॉन्ट्रॅक्ट्स तुमच्या सबग्राफ इंडेक्सेस परिभाषित करतो, या कॉन्ट्रॅक्टमधील कोणत्या इव्हेंट्सकडे लक्ष द्यायचे आणि ग्राफ नोड स्टोअर करत असलेल्या आणि क्वेरी करण्याची परवानगी देणार्‍या घटकांसाठी इव्हेंट डेटा कसा मॅप करायचा. सबग्राफ मॅनिफेस्टसाठी संपूर्ण तपशील [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md) आढळू शकतात. - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -मॅनिफेस्टसाठी अद्यतनित करण्याच्या महत्त्वाच्या नोंदी आहेत: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `वैशिष्ट्ये`: सर्व वापरलेल्या [वैशिष्ट्य](#experimental-features) नावांची सूची. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: स्मार्ट कॉन्ट्रॅक्टचा पत्ता सबग्राफ स्त्रोत आणि वापरण्यासाठी स्मार्ट कॉन्ट्रॅक्टचा ABI. पत्ता ऐच्छिक आहे; ते वगळणे सर्व करारांमधून जुळणारे इव्हेंट अनुक्रमित करण्यास अनुमती देते. - -- `dataSources.source.startBlock`: ब्लॉकची पर्यायी संख्या ज्यावरून डेटा स्रोत अनुक्रमणिका सुरू करतो. बहुतेक प्रकरणांमध्ये, आम्ही ज्या ब्लॉकमध्ये करार तयार केला होता तो वापरण्याचा सल्ला देतो. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: स्त्रोत करारासाठी एक किंवा अधिक नावाच्या ABI फाइल्स तसेच तुम्ही मॅपिंगमधून परस्परसंवाद करता अशा इतर स्मार्ट करारांसाठी. - -- `dataSources.mapping.eventHandlers`: या सबग्राफवर प्रतिक्रिया देणारे स्मार्ट कॉन्ट्रॅक्ट इव्हेंट आणि मॅपिंगमधील हँडलर्सची यादी करते—./src/mapping.ts उदाहरणामध्ये—जे या इव्हेंटचे स्टोअरमधील घटकांमध्ये रूपांतर करतात. - -- `dataSources.mapping.callHandlers`: या सबग्राफवर प्रतिक्रिया देणारे स्मार्ट कॉन्ट्रॅक्ट फंक्शन्स सूचीबद्ध करते आणि मॅपिंगमधील हँडलर्स जे इनपुट आणि आउटपुटला स्टोअरमधील संस्थांमध्ये फंक्शन कॉलमध्ये रूपांतरित करतात. - -- `dataSources.mapping.blockHandlers`: हा सबग्राफ ज्या ब्लॉक्सवर प्रतिक्रिया देतो आणि मॅपिंगमधील हँडलर चेनमध्ये ब्लॉक जोडला जातो तेव्हा रन करण्यासाठी सूचीबद्ध करतो. फिल्टरशिवाय, ब्लॉक हँडलर प्रत्येक ब्लॉकला चालवला जाईल. हँडलरला `kind: call` सह `filter` फील्ड जोडून पर्यायी कॉल-फिल्टर प्रदान केले जाऊ शकते. ब्लॉकमध्ये डेटा स्रोत करारासाठी किमान एक कॉल असेल तरच हे हँडलर चालवेल. - -एकच सबग्राफ एकाधिक स्मार्ट कॉन्ट्रॅक्ट्समधील डेटा अनुक्रमित करू शकतो. प्रत्येक करारासाठी एक एंट्री जोडा ज्यामधून डेटा `डेटास्रोत` अॅरेमध्ये अनुक्रमित करणे आवश्यक आहे. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. समान व्यवहारामधील इव्हेंट आणि कॉल ट्रिगर्स एक नियम वापरून ऑर्डर केले जातात: प्रथम इव्हेंट ट्रिगर नंतर कॉल ट्रिगर, प्रत्येक प्रकार मॅनिफेस्टमध्ये परिभाषित केलेल्या क्रमाचा आदर करतो. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| आवृत्ती | रिलीझ नोट्स | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### ABIs मिळवणे - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- तुम्ही सार्वजनिक प्रकल्पासाठी सबग्राफ तयार करत असल्यास, तुम्ही तो प्रकल्प तुमच्या संगणकावर डाउनलोड करू शकता आणि [`ट्रफल कंपाइल`](https://truffleframework.com/docs/truffle/overview) वापरून किंवा कंपाइल करण्यासाठी सोल्क वापरून ABI मिळवू शकता. -- तुम्ही [Etherscan](https://etherscan.io/) वर ABI देखील शोधू शकता, परंतु हे नेहमीच विश्वसनीय नसते, कारण तेथे अपलोड केलेला ABI कालबाह्य असू शकतो. तुमच्याकडे योग्य ABI असल्याची खात्री करा, अन्यथा तुमचा सबग्राफ चालवणे अयशस्वी होईल. - -## ग्राफक्यूएल स्कीमा - -तुमच्या सबग्राफसाठी स्कीमा `schema.graphql` फाइलमध्ये आहे. GraphQL स्कीमा GraphQL इंटरफेस व्याख्या भाषा वापरून परिभाषित केले आहेत. तुम्ही कधीही GraphQL स्कीमा लिहिला नसल्यास, GraphQL प्रकार प्रणालीवर हा प्राइमर तपासण्याची शिफारस केली जाते. GraphQL स्कीमासाठी संदर्भ दस्तऐवजीकरण [GraphQL API](/querying/graphql-api) विभागात आढळू शकते. - -## संस्था परिभाषित करणे - -संस्था परिभाषित करण्यापूर्वी, एक पाऊल मागे घेणे आणि तुमचा डेटा कसा संरचित आणि लिंक केलेला आहे याचा विचार करणे महत्त्वाचे आहे. सबग्राफ स्कीमामध्ये परिभाषित केलेल्या डेटा मॉडेल आणि सबग्राफद्वारे अनुक्रमित घटकांविरुद्ध सर्व क्वेरी केल्या जातील. यामुळे, सबग्राफ स्कीमा तुमच्या dapp च्या गरजांशी जुळेल अशा प्रकारे परिभाषित करणे चांगले आहे. इव्हेंट किंवा फंक्शन्स ऐवजी "डेटा असलेले ऑब्जेक्ट्स" म्हणून घटकांची कल्पना करणे उपयुक्त ठरू शकते. - -The Graph सह, तुम्ही फक्त `schema.graphql` मध्ये अस्तित्व प्रकार परिभाषित करता, आणि ग्राफ नोड एकल उदाहरणे आणि त्या घटक प्रकाराच्या संग्रहासाठी क्वेरी करण्यासाठी उच्च स्तरीय फील्ड तयार करेल. प्रत्येक प्रकार जो अस्तित्व असावा त्याला `@entity` निर्देशासह भाष्य करणे आवश्यक आहे. डीफॉल्टनुसार, घटक बदलण्यायोग्य असतात, याचा अर्थ मॅपिंग विद्यमान घटक लोड करू शकतात, त्यामध्ये सुधारणा करू शकतात आणि त्या घटकाची नवीन आवृत्ती संग्रहित करू शकतात. परिवर्तनशीलता किंमतीवर येते आणि ज्या घटकांसाठी हे ज्ञात आहे की ते कधीही सुधारित केले जाणार नाहीत, उदाहरणार्थ, त्यांच्यामध्ये केवळ साखळीतून शब्दशः काढलेला डेटा असतो, त्यांना `@entity सह अपरिवर्तनीय म्हणून चिन्हांकित करण्याची शिफारस केली जाते. (अपरिवर्तनीय: खरे)`. मॅपिंग अपरिवर्तनीय घटकांमध्ये बदल करू शकतात जोपर्यंत ते बदल त्याच ब्लॉकमध्ये होतात ज्यामध्ये अस्तित्व तयार केले गेले होते. अपरिवर्तनीय संस्था लिहिण्यासाठी आणि क्वेरी करण्यासाठी खूप वेगवान असतात आणि म्हणून जेव्हा शक्य असेल तेव्हा वापरल्या पाहिजेत. - -### उत्तम उदाहरण - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### वाईट उदाहरण - -खालील उदाहरण `GravatarAccepted` आणि `GravatarDeclined` घटक इव्हेंटवर आधारित आहेत. 1:1 घटकांना इव्हेंट किंवा फंक्शन कॉल मॅप करण्याची शिफारस केलेली नाही. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### पर्यायी आणि आवश्यक फील्ड - -घटक फील्ड आवश्यक किंवा पर्यायी म्हणून परिभाषित केले जाऊ शकतात. आवश्यक फील्ड स्कीमामधील `!` द्वारे सूचित केले जातात. मॅपिंगमध्ये आवश्यक फील्ड सेट केले नसल्यास, फील्डची चौकशी करताना तुम्हाला ही त्रुटी प्राप्त होईल: - -``` -नॉन-नल फील्ड 'नावासाठी शून्य मूल्य सोडवले -``` - -प्रत्येक घटकामध्ये एक `id` फील्ड असणे आवश्यक आहे, जे `Bytes!` किंवा `String!` प्रकारचे असणे आवश्यक आहे. सामान्यतः `बाइट्स!` वापरण्याची शिफारस केली जाते, जोपर्यंत `id` मध्ये मानवी वाचता येण्याजोगा मजकूर नसतो, कारण `Bytes!` आयडी असलेल्या संस्था लिहिण्यास जलद असतील. आणि `स्ट्रिंग!` `id` सह क्वेरी करा. `id` फील्ड प्राथमिक की म्हणून काम करते आणि समान प्रकारच्या सर्व घटकांमध्ये अद्वितीय असणे आवश्यक आहे. ऐतिहासिक कारणांसाठी, `ID!` हा प्रकार देखील स्वीकारला जातो आणि `String!` साठी समानार्थी शब्द आहे. - -काही घटक प्रकारांसाठी `id` हे इतर दोन घटकांच्या आयडीवरून तयार केले जाते; ते `concat` वापरून शक्य आहे, उदा., `left` च्या id वरून id तयार करण्यासाठी `let id = left.id.concat(right.id)` > आणि `उजवीकडे`. त्याचप्रमाणे, विद्यमान घटकाच्या आयडी आणि काउंटर `count` वरून आयडी तयार करण्यासाठी, `let id = left.id.concatI32(count)` वापरता येईल. अशा सर्व घटकांसाठी `left` ची लांबी समान असेल तोपर्यंत युनिक आयडी तयार करण्याची हमी दिली जाते, उदाहरणार्थ, `left.id` हा `पत्ता आहे `. - -### अंगभूत स्केलर प्रकार - -#### ग्राफक्यूएल समर्थित स्केलर - -We support the following scalars in our GraphQL API: - -| प्रकार | वर्णन | -| --- | --- | -| `बाइट्स` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `स्ट्रिंग` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `बुलियन` | `बूलियन` मूल्यांसाठी स्केलर. | -| `इंट` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | मोठे पूर्णांक. इथरियमच्या `uint32`, `int64`, `uint64`, ..., `uint256` प्रकारांसाठी वापरले जाते. टीप: `uint32` खाली सर्व काही, जसे की `int32`, `uint24` किंवा `int8` `i32` म्हणून प्रस्तुत केले जाते 0>. | -| `बिग डेसिमल` | `BigDecimal` उच्च सुस्पष्टता दशांश एक महत्त्वपूर्ण आणि घातांक म्हणून प्रस्तुत केले जाते. घातांक श्रेणी −6143 ते +6144 पर्यंत आहे. 34 लक्षणीय अंकांपर्यंत पूर्णांक. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### एनम्स - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -एकदा enum स्कीमामध्ये परिभाषित केल्यावर, तुम्ही enum मूल्याचे स्ट्रिंग प्रतिनिधित्व एखाद्या घटकावर enum फील्ड सेट करण्यासाठी वापरू शकता. उदाहरणार्थ, तुम्ही प्रथम तुमची संस्था परिभाषित करून आणि नंतर `entity.tokenStatus = "SecondOwner"` सह फील्ड सेट करून `tokenStatus` ला `SecondOwner` वर सेट करू शकता. इनम फील्डसह टोकन अस्तित्व कसे दिसेल हे खालील उदाहरण दाखवते: - -एनम्स लिहिण्याबद्दल अधिक तपशील [GraphQL दस्तऐवजीकरण](https://graphql.org/learn/schema/) मध्ये आढळू शकतात. - -#### अस्तित्व संबंध - -एखाद्या घटकाचा तुमच्या स्कीमामधील एक किंवा अधिक इतर घटकांशी संबंध असू शकतो. हे नातेसंबंध तुमच्या प्रश्नांमध्ये असू शकतात. आलेखामधील संबंध दिशाहीन आहेत. नात्याच्या "शेवट" वर एकदिशात्मक संबंध परिभाषित करून द्विदिशात्मक संबंधांचे अनुकरण करणे शक्य आहे. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### वन-टू-वन संबंध - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### एक-ते-अनेक संबंध - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### उलटे लुकअप - -`@derivedFrom` फील्डद्वारे एखाद्या घटकावर उलट लुकअप परिभाषित केले जाऊ शकतात. हे घटकावर एक आभासी फील्ड तयार करते ज्याची चौकशी केली जाऊ शकते परंतु मॅपिंग API द्वारे व्यक्तिचलितपणे सेट केली जाऊ शकत नाही. उलट, ते इतर घटकावर परिभाषित केलेल्या नातेसंबंधातून प्राप्त झाले आहे. अशा संबंधांसाठी, नातेसंबंधाच्या दोन्ही बाजू संचयित करणे क्वचितच अर्थपूर्ण आहे आणि अनुक्रमणिका आणि क्वेरी कार्यप्रदर्शन दोन्ही चांगले होईल जेव्हा फक्त एक बाजू संग्रहित केली जाते आणि दुसरी साधित केली जाते. - -एक-ते-अनेक संबंधांसाठी, संबंध नेहमी 'एका' बाजूला साठवले पाहिजेत आणि 'अनेक' बाजू नेहमी काढल्या पाहिजेत. 'अनेक' बाजूंवर संस्थांचा अ‍ॅरे संचयित करण्याऐवजी अशा प्रकारे नातेसंबंध संचयित केल्याने, अनुक्रमणिका आणि सबग्राफ क्वेरी या दोन्हीसाठी नाटकीयरित्या चांगले कार्यप्रदर्शन होईल. सर्वसाधारणपणे, घटकांचे अ‍ॅरे संग्रहित करणे जितके व्यावहारिक आहे तितके टाळले पाहिजे. - -#### उदाहरण - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### अनेक-ते-अनेक संबंध - -अनेक-ते-अनेक नातेसंबंधांसाठी, जसे की वापरकर्ते जे कोणत्याही संस्थेशी संबंधित असू शकतात, सर्वात सरळ, परंतु सामान्यतः सर्वात कार्यक्षम नसतात, संबंध मॉडेल करण्याचा मार्ग समाविष्ट असलेल्या दोन घटकांपैकी प्रत्येकामध्ये एक अॅरे आहे. नातेसंबंध सममितीय असल्यास, नातेसंबंधाची फक्त एक बाजू संग्रहित करणे आवश्यक आहे आणि दुसरी बाजू मिळवता येते. - -#### उदाहरण - -`वापरकर्ता` घटक प्रकारापासून `संस्थे` घटक प्रकारापर्यंत रिव्हर्स लुकअप परिभाषित करा. खालील उदाहरणामध्ये, हे `संस्था` घटकामधील `सदस्य` विशेषता शोधून साध्य केले जाते. क्वेरींमध्ये, `वापरकर्ता` वरील `संस्था` फील्ड वापरकर्त्याचा आयडी समाविष्ट असलेल्या सर्व `संस्था` घटक शोधून सोडवले जाईल. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -अनेक-ते-अनेक संबंध संचयित करण्याच्या या अधिक विस्तृत मार्गामुळे सबग्राफसाठी कमी डेटा संग्रहित केला जाईल आणि म्हणूनच अनुक्रमणिका आणि क्वेरीसाठी नाटकीयरित्या वेगवान असलेल्या सबग्राफमध्ये. - -#### स्कीमामध्ये टिप्पण्या जोडत आहे - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## पूर्ण मजकूर शोध फील्ड परिभाषित करणे - -मजकूर शोध इनपुटवर आधारित फुलटेक्स्ट शोध क्वेरी फिल्टर आणि रँक घटक. फुलटेक्स्ट क्वेरी अनुक्रमित मजकूर डेटाशी तुलना करण्यापूर्वी स्टेममध्ये क्वेरी मजकूर इनपुटवर प्रक्रिया करून समान शब्दांसाठी जुळणी परत करण्यास सक्षम आहेत. - -पूर्ण मजकूर क्वेरी व्याख्येमध्ये क्वेरीचे नाव, मजकूर फील्डवर प्रक्रिया करण्यासाठी वापरला जाणारा भाषा शब्दकोष, परिणाम ऑर्डर करण्यासाठी वापरले जाणारे रँकिंग अल्गोरिदम आणि शोधामध्ये समाविष्ट फील्ड समाविष्ट असतात. प्रत्येक फुलटेक्स्ट क्वेरी एकाधिक फील्डमध्ये असू शकते, परंतु सर्व समाविष्ट फील्ड एकाच घटक प्रकारातील असणे आवश्यक आहे. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[वैशिष्ट्य व्यवस्थापन](#experimental-features):** `specVersion` `0.0.4 आणि पुढे, सबग्राफ मॅनिफेस्टमधील वैशिष्ट्ये` विभागांतर्गत `fullTextSearch` घोषित करणे आवश्यक आहे. - -### भाषा समर्थित - -भिन्न भाषा निवडल्याने पूर्ण मजकूर शोध API वर निश्चित, काहीवेळा सूक्ष्म असले तरी परिणाम होईल. फुलटेक्स्ट क्वेरी फील्डद्वारे कव्हर केलेले फील्ड निवडलेल्या भाषेच्या संदर्भात तपासले जातात, म्हणून विश्लेषण आणि शोध क्वेरीद्वारे तयार केलेले लेक्सेम भाषेनुसार भिन्न असतात. उदाहरणार्थ: समर्थित तुर्की शब्दकोश वापरताना "टोकन" हे "टोक" वर स्टेम केले जाते, तर अर्थातच, इंग्रजी शब्दकोश "टोकन" वर स्टेम करेल. - -समर्थित भाषा शब्दकोश: - -| कोड | शब्दकोश | -| ---- | ---------- | -| सोपे | सामान्य | -| da | डॅनिश | -| nl | डच | -| en | इंग्रजी | -| fi | फिनिश | -| fr | फ्रेंच | -| de | जर्मन | -| hu | हंगेरियन | -| ते | इटालियन | -| नाही | नॉर्वेजियन | -| pt | पोर्तुगीज | -| ro | रोमानियन | -| ru | रशियन | -| es | स्पॅनिश | -| sv | स्वीडिश | -| tr | तुर्की | - -### रँकिंग अल्गोरिदम - -परिणाम ऑर्डर करण्यासाठी समर्थित अल्गोरिदम: - -| अल्गोरिदम | वर्णन | -| ------------- | ---------------------------------------------------------------------- | -| रँक | निकाल ऑर्डर करण्यासाठी फुलटेक्स्ट क्वेरीची जुळणी गुणवत्ता (0-1) वापरा. | -| proximityRank | रँक प्रमाणेच पण सामन्यांच्या समीपतेचाही समावेश आहे. | - -## मॅपिंग लेखन - -मॅपिंग्स एका विशिष्ट स्त्रोताकडून डेटा घेतात आणि आपल्या स्कीमामध्ये परिभाषित केलेल्या घटकांमध्ये त्याचे रूपांतर करतात. मॅपिंग्स [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) नावाच्या [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) नावाच्या उपसंचात लिहिल्या जातात जे WASM ([WebAssembly](https://webassembly.org/)) मध्ये संकलित केले जाऊ शकतात. असेंबलीस्क्रिप्ट सामान्य TypeScript पेक्षा कठोर आहे, तरीही एक परिचित वाक्यरचना प्रदान करते. - -`mapping.eventHandlers` अंतर्गत `subgraph.yaml` मध्ये परिभाषित केलेल्या प्रत्येक इव्हेंट हँडलरसाठी, त्याच नावाचे निर्यात केलेले कार्य तयार करा. प्रत्येक हँडलरने हाताळल्या जात असलेल्या इव्हेंटच्या नावाशी संबंधित प्रकारासह `इव्हेंट` नावाचा एकच पॅरामीटर स्वीकारला पाहिजे. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -पहिला हँडलर `NewGravatar` इव्हेंट घेतो आणि `नवीन Gravatar(event.params.id.toHex())` सह एक नवीन `Gravatar` अस्तित्व तयार करतो, पॉप्युलेट करतो संबंधित इव्हेंट पॅरामीटर्स वापरून संस्था फील्ड. हे घटक उदाहरण `event.params.id.toHex()` च्या id मूल्यासह, व्हेरिएबल `gravatar` द्वारे प्रस्तुत केले जाते. - -दुसरा हँडलर ग्राफ नोड स्टोअरमधून विद्यमान `Gravatar` लोड करण्याचा प्रयत्न करतो. ते अद्याप अस्तित्वात नसल्यास, ते मागणीनुसार तयार केले जाते. नंतर `gravatar.save()` वापरून स्टोअरमध्ये परत सेव्ह करण्यापूर्वी नवीन इव्हेंट पॅरामीटर्सशी जुळण्यासाठी घटक अपडेट केला जातो. - -### नवीन संस्था तयार करण्यासाठी शिफारस केलेले आयडी - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## कोड जनरेशन - -स्मार्ट कॉन्ट्रॅक्ट्स, इव्हेंट्स आणि संस्थांसोबत काम करणे सोपे आणि टाइप-सुरक्षित करण्यासाठी, ग्राफ CLI सबग्राफच्या GraphQL स्कीमा आणि डेटा स्रोतांमध्ये समाविष्ट केलेल्या कॉन्ट्रॅक्ट ABIs मधून असेंबलीस्क्रिप्ट प्रकार व्युत्पन्न करू शकतो. - -यासह केले जाते - -```sh -graph codegen [--output-dir ] [] -``` - -परंतु बर्‍याच प्रकरणांमध्ये, सबग्राफ आधीपासूनच `package.json` द्वारे कॉन्फिगर केलेले असतात जे तुम्हाला ते साध्य करण्यासाठी खालीलपैकी एक चालवण्याची परवानगी देतात: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -हे `subgraph.yaml` मध्ये नमूद केलेल्या ABI फाइल्समधील प्रत्येक स्मार्ट करारासाठी असेंब्लीस्क्रिप्ट क्लास तयार करेल, ज्यामुळे तुम्हाला हे करार मॅपिंगमधील विशिष्ट पत्त्यांवर बंधनकारक करता येतील आणि ब्लॉकच्या विरूद्ध केवळ-वाचनीय करार पद्धती कॉल करता येतील. प्रक्रिया केली. हे प्रत्येक कॉन्ट्रॅक्ट इव्हेंटसाठी इव्हेंट पॅरामीटर्स तसेच ब्लॉक आणि इव्हेंटमधून उद्भवलेल्या व्यवहारासाठी सुलभ प्रवेश प्रदान करण्यासाठी एक वर्ग देखील तयार करेल. हे सर्व प्रकार `//.ts` वर लिहिलेले आहेत. उदाहरणाच्या सबग्राफमध्ये, हे `generated/Gravity/Gravity.ts` असेल, मॅपिंगना हे प्रकार आयात करण्यास अनुमती देतात. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -या व्यतिरिक्त, सबग्राफच्या GraphQL स्कीमामध्ये प्रत्येक घटक प्रकारासाठी एक वर्ग तयार केला जातो. हे वर्ग टाइप-सेफ एंटिटी लोडिंग, एंटिटी फील्डमध्ये वाचन आणि लिहिण्याचा अॅक्सेस प्रदान करतात तसेच स्टोअर करण्यासाठी संस्था लिहिण्यासाठी `सेव्ह()` पद्धत प्रदान करतात. सर्व घटक वर्ग `/schema.ts` वर लिहिलेले आहेत, मॅपिंगना ते यासह आयात करण्यास अनुमती देतात - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **टीप:** मॅनिफेस्टमध्ये समाविष्ट केलेल्या GraphQL स्कीमा किंवा ABI मध्ये प्रत्येक बदलानंतर कोड जनरेशन पुन्हा केले जाणे आवश्यक आहे. सबग्राफ तयार करण्यापूर्वी किंवा उपयोजित करण्यापूर्वी ते किमान एकदा केले जाणे आवश्यक आहे. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## डेटा स्रोत टेम्पलेट्स - -EVM-सुसंगत स्मार्ट कॉन्ट्रॅक्ट्समधील एक सामान्य नमुना म्हणजे नोंदणी किंवा फॅक्टरी कॉन्ट्रॅक्टचा वापर, जिथे एक करार तयार करतो, व्यवस्थापित करतो किंवा इतर करारांची अनियंत्रित संख्या संदर्भित करतो ज्या प्रत्येकाची स्वतःची स्थिती आणि घटना असतात. - -या उप-करारांचे पत्ते कदाचित माहीत नसतील किंवा नसतील आणि यापैकी बरेच करार तयार केले जाऊ शकतात आणि/किंवा कालांतराने जोडले जाऊ शकतात. म्हणूनच, अशा प्रकरणांमध्ये, एकल डेटा स्रोत किंवा डेटा स्त्रोतांची निश्चित संख्या परिभाषित करणे अशक्य आहे आणि अधिक गतिशील दृष्टीकोन आवश्यक आहे: _डेटा स्रोत टेम्पलेट्स_. - -### मुख्य करारासाठी डेटा स्रोत - -प्रथम, तुम्ही मुख्य करारासाठी नियमित डेटा स्रोत परिभाषित करता. खाली दिलेला स्निपेट [Uniswap](https://uniswap.org) एक्सचेंज फॅक्टरी करारासाठी एक सरलीकृत उदाहरण डेटा स्रोत दर्शवितो. `NewExchange(address,address)` इव्हेंट हँडलर लक्षात ठेवा. जेव्हा कारखाना कराराद्वारे नवीन एक्सचेंज कॉन्ट्रॅक्ट ऑन-चेन तयार केला जातो तेव्हा हे उत्सर्जित होते. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -त्यानंतर, तुम्ही मॅनिफेस्टमध्ये _डेटा स्रोत टेम्पलेट्स_ जोडता. हे नियमित डेटा स्रोतांसारखेच आहेत, त्याशिवाय त्यांना `स्रोत` अंतर्गत पूर्व-परिभाषित करार पत्ता नाही. सामान्यत:, तुम्ही पालक कराराद्वारे व्यवस्थापित किंवा संदर्भित केलेल्या प्रत्येक प्रकारच्या उप-करारासाठी एक टेम्पलेट परिभाषित कराल. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### डेटा स्रोत टेम्पलेट इन्स्टंट करणे - -अंतिम चरणात, तुम्ही टेम्पलेटपैकी एकावरून डायनॅमिक डेटा स्रोत उदाहरण तयार करण्यासाठी तुमचे मुख्य कॉन्ट्रॅक्ट मॅपिंग अपडेट करता. या उदाहरणात, तुम्ही `Exchange` टेम्प्लेट इंपोर्ट करण्यासाठी मुख्य कॉन्ट्रॅक्ट मॅपिंग बदलू शकता आणि नवीन एक्सचेंज कॉन्ट्रॅक्ट इंडेक्सिंग सुरू करण्यासाठी त्यावर `Exchange.create(address)` पद्धत कॉल कराल. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **टीप:** नवीन डेटा स्रोत केवळ तो ज्या ब्लॉकमध्ये तयार केला गेला होता आणि पुढील सर्व ब्लॉकसाठी कॉल्स आणि इव्हेंटवर प्रक्रिया करेल, परंतु ऐतिहासिक डेटावर प्रक्रिया करणार नाही, म्हणजे, डेटावर प्रक्रिया करणार नाही. जे आधीच्या ब्लॉक्समध्ये समाविष्ट आहे. -> -> पूर्वीच्या ब्लॉक्समध्ये नवीन डेटा स्रोताशी संबंधित डेटा असल्यास, कराराची वर्तमान स्थिती वाचून आणि नवीन डेटा स्रोत तयार करताना त्या स्थितीचे प्रतिनिधित्व करणारी संस्था तयार करून तो डेटा अनुक्रमित करणे सर्वोत्तम आहे. - -### डेटा स्रोत संदर्भ - -डेटा स्रोत संदर्भ टेम्प्लेट इन्स्टंट करताना अतिरिक्त कॉन्फिगरेशन पास करण्याची परवानगी देतात. आमच्या उदाहरणात, समजा एक्सचेंजेस एका विशिष्ट ट्रेडिंग जोडीशी संबंधित आहेत, जे `NewExchange` इव्हेंटमध्ये समाविष्ट आहे. ती माहिती तात्काळ डेटा स्त्रोतामध्ये पास केली जाऊ शकते, जसे की: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## ब्लॉक सुरू करा - -`startBlock` ही एक पर्यायी सेटिंग आहे जी तुम्हाला साखळीतील कोणत्या ब्लॉकमधून डेटा स्रोत अनुक्रमणिका सुरू करेल हे परिभाषित करू देते. स्टार्ट ब्लॉक सेट केल्याने डेटा स्त्रोताला अप्रासंगिक असलेले लाखो ब्लॉक्स वगळण्याची परवानगी मिळते. सामान्यतः, सबग्राफ डेव्हलपर `startBlock` सेट करेल ज्या ब्लॉकमध्ये डेटा स्रोताचा स्मार्ट करार तयार केला गेला होता. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## हँडलर्सना कॉल करा - -इव्हेंट्स कराराच्या स्थितीत संबंधित बदल एकत्रित करण्याचा एक प्रभावी मार्ग प्रदान करतात, तर अनेक करार गॅसच्या किमती ऑप्टिमाइझ करण्यासाठी लॉग तयार करणे टाळतात. या प्रकरणांमध्ये, सबग्राफ डेटा स्त्रोत करारावर केलेल्या कॉलची सदस्यता घेऊ शकतो. फंक्शन सिग्नेचर आणि मॅपिंग हँडलरचा संदर्भ देणारे कॉल हँडलर परिभाषित करून हे साध्य केले जाते जे या फंक्शनवर कॉलवर प्रक्रिया करेल. या कॉल्सवर प्रक्रिया करण्यासाठी, मॅपिंग हँडलरला कॉलमधील टाइप केलेल्या इनपुट आणि आउटपुटसह युक्तिवाद म्हणून `ethereum.Call` प्राप्त होईल. व्यवहाराच्या कॉल साखळीतील कोणत्याही खोलीत केलेले कॉल मॅपिंगला ट्रिगर करतील, प्रॉक्सी कॉन्ट्रॅक्टद्वारे डेटा स्त्रोत करारासह क्रियाकलाप कॅप्चर करण्यास अनुमती देईल. - -कॉल हँडलर्स फक्त दोनपैकी एका प्रकरणात ट्रिगर होतील: जेव्हा निर्दिष्ट केलेल्या फंक्शनला कॉन्ट्रॅक्ट व्यतिरिक्त इतर खात्याद्वारे कॉल केले जाते किंवा जेव्हा ते सॉलिडिटीमध्ये बाह्य म्हणून चिन्हांकित केले जाते आणि त्याच कॉन्ट्रॅक्टमधील दुसर्‍या फंक्शनचा भाग म्हणून कॉल केले जाते. - -> **टीप:** कॉल हँडलर सध्या पॅरिटी ट्रेसिंग API वर अवलंबून आहेत. काही नेटवर्क, जसे की BNB चेन आणि आर्बिट्रम, या API चे समर्थन करत नाहीत. जर या नेटवर्कपैकी एका सबग्राफ इंडेक्सिंगमध्ये एक किंवा अधिक कॉल हँडलर असतील, तर ते समक्रमण सुरू होणार नाही. सबग्राफ विकसकांनी त्याऐवजी इव्हेंट हँडलर वापरावे. हे कॉल हँडलर्सपेक्षा कितीतरी अधिक कार्यक्षम आहेत, आणि प्रत्येक evm नेटवर्कवर समर्थित आहेत. - -### कॉल हँडलरची व्याख्या - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`फंक्शन` हे कॉल फिल्टर करण्यासाठी सामान्यीकृत फंक्शन स्वाक्षरी आहे. `हँडलर` गुणधर्म हे तुमच्या मॅपिंगमधील फंक्शनचे नाव आहे जे तुम्ही डेटा सोर्स कॉन्ट्रॅक्टमध्ये टार्गेट फंक्शन कॉल केल्यावर कार्यान्वित करू इच्छिता. - -### मॅपिंग कार्य - -प्रत्येक कॉल हँडलर एकच पॅरामीटर घेतो ज्याचा प्रकार कॉल फंक्शनच्या नावाशी संबंधित असतो. वरील उदाहरणातील सबग्राफमध्ये, मॅपिंगमध्ये `createGravatar` फंक्शन कॉल केल्यावर आणि वितर्क म्हणून `CreateGravatarCall` पॅरामीटर प्राप्त करण्यासाठी हँडलर असतो: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -`handleCreateGravatar` फंक्शन एक नवीन `CreateGravatarCall` घेते जो `@graphprotocol/graph-tsethereum.Call` चा उपवर्ग आहे. code>, ज्यामध्ये कॉलचे टाइप केलेले इनपुट आणि आउटपुट समाविष्ट आहेत. तुम्ही `graph codegen` चालवता तेव्हा तुमच्यासाठी `CreateGravatarCall` प्रकार जनरेट केला जातो. - -## ब्लॉक हँडलर - -कॉन्ट्रॅक्ट इव्हेंट्स किंवा फंक्शन कॉल्सची सदस्यता घेण्याव्यतिरिक्त, सबग्राफला त्याचा डेटा अद्यतनित करायचा असेल कारण साखळीमध्ये नवीन ब्लॉक्स जोडले जातात. हे साध्य करण्यासाठी सबग्राफ प्रत्येक ब्लॉकनंतर किंवा पूर्व-परिभाषित फिल्टरशी जुळणार्‍या ब्लॉक्सनंतर फंक्शन चालवू शकतो. - -### समर्थित फिल्टर - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -ब्लॉक हँडलरसाठी फिल्टरची अनुपस्थिती हे सुनिश्चित करेल की हँडलरला प्रत्येक ब्लॉक म्हटले जाईल. डेटा स्त्रोतामध्ये प्रत्येक फिल्टर प्रकारासाठी फक्त एक ब्लॉक हँडलर असू शकतो. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### मॅपिंग कार्य - -मॅपिंग फंक्शनला त्याचा एकमेव युक्तिवाद म्हणून `ethereum.Block` प्राप्त होईल. इव्हेंटसाठी मॅपिंग फंक्शन्सप्रमाणे, हे फंक्शन स्टोअरमधील विद्यमान सबग्राफ घटकांमध्ये प्रवेश करू शकते, स्मार्ट कॉन्ट्रॅक्ट कॉल करू शकते आणि संस्था तयार किंवा अद्यतनित करू शकते. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## अनामिक घटना - -तुम्हाला सॉलिडिटीमध्ये निनावी इव्हेंटवर प्रक्रिया करायची असल्यास, ते इव्हेंटचा विषय 0 प्रदान करून प्राप्त केले जाऊ शकते, उदाहरणार्थ: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -जेव्हा स्वाक्षरी आणि विषय 0 दोन्ही जुळतात तेव्हाच इव्हेंट ट्रिगर केला जाईल. डीफॉल्टनुसार, `topic0` इव्हेंट स्वाक्षरीच्या हॅशच्या समान आहे. - -## Transaction Receipts in Event Handlers - -`specVersion` `0.0.5` आणि `apiVersion` `0.0.7` पासून प्रारंभ करून, इव्हेंट हँडलर्सना पावतीवर प्रवेश असू शकतो व्यवहार ज्याने त्यांना उत्सर्जित केले. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -हँडलर फंक्शनच्या आत, पावती `Event.receipt` फील्डमध्ये ऍक्सेस केली जाऊ शकते. जेव्हा `पावती` की `false` वर सेट केली जाते किंवा मॅनिफेस्टमध्ये वगळली जाते, तेव्हा त्याऐवजी `null` मूल्य परत केले जाईल. - -## प्रायोगिक वैशिष्ट्ये - -`specVersion` `0.0.4` पासून सुरू करून, सबग्राफ वैशिष्ट्ये मॅनिफेस्ट फाइलच्या शीर्ष स्तरावरील `वैशिष्ट्ये` विभागात स्पष्टपणे घोषित करणे आवश्यक आहे, त्यांचा वापर करून `camelCase` नाव, खालील तक्त्यामध्ये सूचीबद्ध केल्याप्रमाणे: - -| वैशिष्ट्य | नाव | -| --------------------------------------------------- | ---------------- | -| [गैर-घातक त्रुटी](#non-fatal-errors) | `nonFatalErrors` | -| [पूर्ण मजकूर शोध](#defining-fulltext-search-fields) | `fullTextSearch` | -| [कलम करणे](#grafting-onto-existing-subgraphs) | `कलम करणे` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -लक्षात ठेवा की वैशिष्ट्य घोषित न करता वापरल्याने सबग्राफ डिप्लॉयमेंट दरम्यान **प्रमाणीकरण त्रुटी** येईल, परंतु वैशिष्ट्य घोषित केले असल्यास परंतु वापरलेले नसल्यास कोणतीही त्रुटी येणार नाही. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### गैर-घातक त्रुटी - -आधीच समक्रमित केलेल्या सबग्राफ्सवर अनुक्रमणिका त्रुटी, डीफॉल्टनुसार, सबग्राफ अयशस्वी होण्यास आणि समक्रमण थांबवण्यास कारणीभूत ठरतील. सबग्राफ वैकल्पिकरित्या त्रुटींच्या उपस्थितीत समक्रमण सुरू ठेवण्यासाठी कॉन्फिगर केले जाऊ शकतात, हँडलरने केलेल्या बदलांकडे दुर्लक्ष करून, ज्यामुळे त्रुटी उद्भवली. हे सबग्राफ लेखकांना त्यांचे सबग्राफ दुरुस्त करण्यासाठी वेळ देते जेव्हा की नवीनतम ब्लॉकच्या विरूद्ध क्वेरी चालू ठेवल्या जातात, जरी त्रुटीमुळे परिणाम विसंगत असू शकतात. लक्षात घ्या की काही त्रुटी अजूनही नेहमीच घातक असतात. गैर-घातक होण्यासाठी, त्रुटी निश्चितपणे ज्ञात असणे आवश्यक आहे. - -> **टीप:** ग्राफ नेटवर्क अद्याप घातक नसलेल्या त्रुटींना समर्थन देत नाही आणि विकासकांनी स्टुडिओद्वारे नेटवर्कवर ती कार्यक्षमता वापरून सबग्राफ उपयोजित करू नये. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -क्वेरीने `subgraphError` युक्तिवादाद्वारे संभाव्य विसंगती असलेल्या डेटाची क्वेरी करणे देखील निवडले पाहिजे. उदाहरणाप्रमाणे सबग्राफ एरर वगळला आहे की नाही हे तपासण्यासाठी `_meta` क्वेरी करण्याची देखील शिफारस केली जाते: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -सबग्राफमध्ये त्रुटी आढळल्यास, ती क्वेरी या उदाहरणाच्या प्रतिसादाप्रमाणे `"indexing_error"` संदेशासह डेटा आणि graphql त्रुटी दोन्ही देईल: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### विद्यमान सबग्राफवर कलम करणे - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -जेव्हा सबग्राफ प्रथम उपयोजित केला जातो, तेव्हा तो संबंधित साखळीच्या उत्पत्ती ब्लॉकवर (किंवा प्रत्येक डेटा स्त्रोतासह परिभाषित केलेल्या `startBlock` वर) काही परिस्थितींमध्ये इव्हेंट्सचे अनुक्रमणिका सुरू करतो; विद्यमान सबग्राफमधील डेटा पुन्हा वापरणे आणि नंतरच्या ब्लॉकमध्ये अनुक्रमणिका सुरू करणे फायदेशीर आहे. अनुक्रमणिकेच्या या मोडला _ग्राफ्टिंग_ म्हणतात. उदाहरणार्थ, मॅपिंगमध्ये भूतकाळातील साध्या चुका लवकर मिळवण्यासाठी किंवा विद्यमान सबग्राफ अयशस्वी झाल्यानंतर तात्पुरते काम करण्यासाठी विकासादरम्यान ग्राफ्टिंग उपयुक्त आहे. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -जेव्हा एखादा सबग्राफ ज्याच्या मॅनिफेस्टमध्ये `ग्राफ्ट` ब्लॉक असतो तो उपयोजित केला जातो, तेव्हा आलेख नोड `बेस` सबग्राफचा डेटा कॉपी करेल आणि दिलेल्या `ब्लॉक` सह आणि नंतर त्या ब्लॉकमधून नवीन सबग्राफ अनुक्रमित करणे सुरू ठेवा. बेस सबग्राफ लक्ष्य ग्राफ नोडच्या उदाहरणावर अस्तित्वात असणे आवश्यक आहे आणि कमीतकमी दिलेल्या ब्लॉकपर्यंत अनुक्रमित केलेले असणे आवश्यक आहे. या निर्बंधामुळे, ग्राफ्टिंगचा वापर केवळ विकासादरम्यान किंवा आणीबाणीच्या काळात समतुल्य नॉन-ग्राफ्टेड सबग्राफ तयार करण्यासाठी वेगवान करण्यासाठी केला पाहिजे. - -बेस डेटा इंडेक्स करण्याऐवजी कॉपीचे ग्राफ्टिंग केल्यामुळे, सुरवातीपासून इंडेक्स करण्यापेक्षा इच्छित ब्लॉकमध्ये सबग्राफ मिळवणे खूप जलद आहे, जरी सुरुवातीच्या डेटा कॉपीला खूप मोठ्या सबग्राफसाठी बरेच तास लागू शकतात. ग्रॅफ्टेड सबग्राफ सुरू होत असताना, ग्राफ नोड आधीपासून कॉपी केलेल्या घटक प्रकारांबद्दल माहिती लॉग करेल. - -ग्राफ्टेड सबग्राफ GraphQL स्कीमा वापरू शकतो जो बेस सबग्राफपैकी एकाशी एकसारखा नसतो, परंतु त्याच्याशी फक्त सुसंगत असतो. ती स्वतःच्या अधिकारात वैध सबग्राफ स्कीमा असणे आवश्यक आहे, परंतु खालील प्रकारे बेस सबग्राफच्या स्कीमापासून विचलित होऊ शकते: - -- हे घटक प्रकार जोडते किंवा काढून टाकते -- हे घटक प्रकारातील गुणधर्म काढून टाकते -- हे अस्तित्व प्रकारांमध्ये रद्द करण्यायोग्य विशेषता जोडते -- हे नॉन-नलेबल अॅट्रिब्यूट्सना न्युलेबल अॅट्रिब्यूटमध्ये बदलते -- हे enums मध्ये मूल्ये जोडते -- हे इंटरफेस जोडते किंवा काढून टाकते -- कोणत्या घटकासाठी इंटरफेस लागू केला जातो ते बदलते - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> हे ऑफ-चेन डेटाच्या निर्धारवादी अनुक्रमणिकेसाठी तसेच अनियंत्रित HTTP-स्रोत डेटाच्या संभाव्य परिचयासाठी देखील पाया घालते. - -### सविश्लेषण - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> हे विद्यमान `ipfs.cat` API बदलते - -### Upgrade guide - -#### `graph-ts` आणि `graph-cli` अपडेट करा - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -मूळ एकत्रित अस्तित्व: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -नवीन, विभाजित अस्तित्व: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -जर मूळ घटक आणि परिणामी फाइल डेटा स्रोत घटक यांच्यातील संबंध 1:1 असेल तर, आयपीएफएस सीआयडी लुकअप म्हणून वापरून मूळ घटकाला परिणामी फाइल घटकाशी जोडणे हा सर्वात सोपा नमुना आहे. तुम्हाला तुमच्या नवीन फाइल-आधारित घटकांचे मॉडेलिंग करण्यात अडचण येत असल्यास Discord वर संपर्क साधा! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### फाइल्सवर प्रक्रिया करण्यासाठी नवीन हँडलर तयार करा - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -उदाहरण हँडलर: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### आवश्यकतेनुसार फाईल डेटा स्रोत तयार करा - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -उदाहरण: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### तुमचे सबग्राफ उपयोजित करत आहे - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -फाइल डेटा स्रोत हँडलर आणि संस्था इतर सबग्राफ संस्थांपासून वेगळ्या केल्या जातात, ते कार्यान्वित केल्यावर ते निर्धारवादी आहेत याची खात्री करून आणि साखळी-आधारित डेटा स्रोतांचे दूषित होणार नाही याची खात्री करतात. विशिष्ट असणे: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> बहुतेक वापर-प्रकरणांसाठी ही मर्यादा समस्याप्रधान नसावी, परंतु काहींसाठी ते जटिलता आणू शकते. सबग्राफमध्‍ये तुमच्‍या फाईल-आधारित डेटाचे मॉडेल बनवण्‍यात तुम्‍हाला समस्या येत असल्‍यास कृपया डिस्‍कॉर्ड द्वारे संपर्क साधा! - -याव्यतिरिक्त, फाइल डेटा स्रोतावरून डेटा स्रोत तयार करणे शक्य नाही, मग ते ऑनचेन डेटा स्रोत असो किंवा अन्य फाइल डेटा स्रोत. भविष्यात हे निर्बंध उठवले जाऊ शकतात. - -#### चांगला सराव - -तुम्ही एनएफटी मेटाडेटाला संबंधित टोकनशी लिंक करत असल्यास, टोकन एंटिटीमधील मेटाडेटा घटकाचा संदर्भ देण्यासाठी मेटाडेटाचा IPFS हॅश वापरा. आयडी म्हणून IPFS हॅश वापरून मेटाडेटा घटक जतन करा. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -तुमच्याकडे अनेक वेळा रीफ्रेश केलेल्या संस्था असल्यास, IPFS हॅश वापरून अद्वितीय फाइल-आधारित संस्था तयार करा & एंटिटी आयडी आणि साखळी-आधारित घटकामध्ये व्युत्पन्न फील्ड वापरून त्यांचा संदर्भ द्या. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### ओळखलेले समस्या - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### उदाहरणे - -[क्रिप्टो कोव्हन सबग्राफ स्थलांतर](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### संदर्भ - -[GIP फाइल डेटा स्रोत](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/mr/developing/creating-a-subgraph/_meta.js b/website/pages/mr/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/mr/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/mr/developing/graph-ts/_meta.js b/website/pages/mr/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/mr/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/mr/managing/deprecate-a-subgraph.mdx b/website/pages/mr/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/mr/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/mr/mips-faqs.mdx b/website/pages/mr/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/mr/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/mr/querying/_meta.js b/website/pages/mr/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/mr/querying/_meta.js +++ b/website/pages/mr/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/mr/querying/graph-client/_meta.js b/website/pages/mr/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/mr/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/nl/_meta.js b/website/pages/nl/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/nl/_meta.js +++ b/website/pages/nl/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/nl/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/nl/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/nl/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/nl/deploying/hosted-service.mdx b/website/pages/nl/deploying/hosted-service.mdx deleted file mode 100644 index 3264d39b8c3c..000000000000 --- a/website/pages/nl/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Creëer een Subgraph - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/nl/deploying/subgraph-studio.mdx b/website/pages/nl/deploying/subgraph-studio.mdx deleted file mode 100644 index f2da63abff0b..000000000000 --- a/website/pages/nl/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/nl/developing/creating-a-subgraph.mdx b/website/pages/nl/developing/creating-a-subgraph.mdx deleted file mode 100644 index e38d897919f8..000000000000 --- a/website/pages/nl/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Overview - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/nl/developing/creating-a-subgraph/_meta.js b/website/pages/nl/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/nl/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/nl/developing/graph-ts/_meta.js b/website/pages/nl/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/nl/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/nl/managing/deprecate-a-subgraph.mdx b/website/pages/nl/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/nl/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/nl/mips-faqs.mdx b/website/pages/nl/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/nl/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/nl/querying/_meta.js b/website/pages/nl/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/nl/querying/_meta.js +++ b/website/pages/nl/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/nl/querying/graph-client/_meta.js b/website/pages/nl/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/nl/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/pl/_meta.js b/website/pages/pl/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/pl/_meta.js +++ b/website/pages/pl/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/pl/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/pl/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/pl/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/pl/deploying/hosted-service.mdx b/website/pages/pl/deploying/hosted-service.mdx deleted file mode 100644 index 627b265a17f8..000000000000 --- a/website/pages/pl/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Jak stworzyć subgraf - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/pl/deploying/subgraph-studio.mdx b/website/pages/pl/deploying/subgraph-studio.mdx deleted file mode 100644 index f2da63abff0b..000000000000 --- a/website/pages/pl/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/pl/developing/creating-a-subgraph.mdx b/website/pages/pl/developing/creating-a-subgraph.mdx deleted file mode 100644 index e38d897919f8..000000000000 --- a/website/pages/pl/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Overview - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/pl/developing/creating-a-subgraph/_meta.js b/website/pages/pl/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/pl/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/pl/developing/graph-ts/_meta.js b/website/pages/pl/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/pl/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/pl/managing/deprecate-a-subgraph.mdx b/website/pages/pl/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/pl/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/pl/mips-faqs.mdx b/website/pages/pl/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/pl/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/pl/querying/_meta.js b/website/pages/pl/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/pl/querying/_meta.js +++ b/website/pages/pl/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/pl/querying/graph-client/_meta.js b/website/pages/pl/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/pl/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/pt/_meta.js b/website/pages/pt/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/pt/_meta.js +++ b/website/pages/pt/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/pt/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/pt/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 3a46de123ce4..000000000000 --- a/website/pages/pt/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Como Lançar um Subgraph no Serviço Hospedado ---- - -> A disponibilidade dos endpoints do serviço hospedado foi encerrada em 12 de junho de 2024. [Saiba mais](/sunrise). - -Esta página explica como lançar um subgraph no Serviço Hospedado. Para lançar um subgraph, instale a [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). Se ainda não criou um subgraph, veja a secção [Como Criar um Subgraph](/developing/creating-a-subgraph). - -## Como criar uma conta no Serviço Hospedado - -Antes de usar o Serviço Hospedado, crie uma conta nele. Para este fim, faça uma conta no [Github](https://github.com/); se não tiver, crie uma primeiro, e depois, navegue ao [Serviço Hospedado](https://thegraph.com/hosted-service/), clique no botão _'Sign up with Github'_ (cadastrar com Github) e complete o fluxo de autorização do Github. - -## Como Armazenar o Token de Acesso - -Após criar uma conta, navegue até o seu [painel de controle](https://thegraph.com/hosted-service/dashboard). Copie o token de acesso (access token) exibido no painel e execute `graph auth --product hosted-service ` para armazenar o token de acesso no seu computador. Isto só precisa ser feito uma vez, a não ser que tenha que gerar o token de acesso novamente. - -## Como Criar um Subgraph no Serviço Hospedado - -Antes de lançar o subgraph, ele tem que ser criado no The Graph Explorer. Vá até o [painel de controle](https://thegraph.com/hosted-service/dashboard), clique no botão _'Add Subgraph'_ (Adicionar Subgraph) e preencha os campos abaixo com as informações adequadas: - -**Image** (Imagem) — Selecione uma imagem para ser usada como foto de prévia e thumbnail (miniatura) para o subgraph. - -**Subgraph Name** (Nome do Subgraph) — Defina o nome em `account-name/subgraph-name` a ser usado para lançamentos e pontos finais do GraphQL, além de nomear a conta sob a qual será criada o subgraph. _Este campo não pode ser alterado depois de pronto._ - -**Account** (Conta) — A conta sob a qual foi criado o subgraph. Pode ser uma conta de indivíduo ou organização. _Depois disto, não é possível movimentar subgraphs entre contas._ - -**Subtitle** (Subtítulo) — Texto que aparecerá em cards de subgraphs. - -**Description** (Descrição) — Descrição do subgraph, visível na página de detalhes do mesmo. - -**GitHub URL** — Atalho ao repositório do subgraph no GitHub. - -**Hide** (Esconder) — Opção alternável que oculta o subgraph no Graph Explorer. - -Após salvar o novo subgraph, aparecerão instruções sobre como instalar o Graph CLI; como gerar estruturas para um novo subgraph; e como lançar o seu subgraph. Os primeiros dois passos foram abordados na seção [Como Definir um Subgraph](/developing/creating-a-subgraph/). - -## Como Lançar um Subgraph no Serviço Hospedado - -O lançamento do seu subgraph enviará os arquivos do subgraph construído com `yarn build` ao IPFS, e mandará que o Graph Explorer comece a indexar o seu subgraph com estes arquivos. - -Lance o subgraph com o comando `yarn deploy` - -Após lançar o subgraph, o Graph Explorer mostrará o estado da sincronização deste. Dependendo da quantia de dados e eventos que devem ser extraídos de blocos históricos, a começar do bloco-génese, a sincronização pode levar de alguns minutos a várias horas. - -O estado do subgraph é mostrado como `Synced` (sincronizado) quando o Graph Node extrai todos os dados de blocos históricos. O Graph Node continuará a inspecionar blocos para o seu subgraph enquanto estes blocos são minerados. - -## Como Relançar um Subgraph - -Ao fazer mudanças à definição do seu subgraph — por exemplo, para resolver um problema nos mapeamentos de entidade — execute o comando `yarn deploy` novamente para lançar a versão atualizada do seu subgraph. Para atualizar um subgraph, o Graph Node sempre deve reindexá-lo por inteiro, novamente começando pelo bloco-gênese. - -Se seu subgraph já lançado ainda estiver no estado `Syncing` (Sincronizando), ele será imediatamente substituído com a versão recém-lançada. Se o mesmo subgraph já estiver totalmente sincronizado, o Graph Node marcará a versão recém-lançada como a `Pending Version` (Versão Pendente), sincronizá-la no fundo, e substituir a versão atual com a nova apenas quando terminar a sincronização da versão nova. Isto garante que você tenha um subgraph para trabalhos enquanto a nova versão sincroniza. - -## Como lançar o subgraph a várias redes - -Em alguns casos, irá querer lançar o mesmo subgraph a várias redes sem duplicar o seu código completo. O grande desafio nisto é que os endereços de contrato nestas redes são diferentes. - -### Como usar o graph-cli - -Tanto o `graph build` (desde a `v0.29.0`) quanto o `graph deploy` (desde a `v0.32.0`) aceitam duas novas opções: - -```sh -Options: - - ... - --network Configuração de rede para usar no arquivo de config de redes - --network-file Local do arquivo de config de redes (padrão: "./networks.json") -``` - -A opção `--network` serve para especificar uma configuração de rede a partir de um arquivo `json` (o comum é `networks.json`), para facilmente atualizar o seu subgraph durante a programação. - -**Nota:** O comando `init` agora irá gerar um `networks.json` automaticamente, com base na informação fornecida. Daí, será possível atualizar redes existentes ou adicionar redes novas. - -Caso não tenha um arquivo `networks.json`, precisará criar o mesmo manualmente, com a seguinte estrutura: - -```json -{ - "network1": { // nome da rede - "dataSource1": { // nome do dataSource - "address": "0xabc...", // endereço do contrato (opcional) - "startBlock": 123456 // bloco inicial (opcional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Nota:** Não precisa especificar quaisquer dos `templates` (se tiver) no arquivo de configuração, apenas as `dataSources`. Se houver alguns `templates` declarados no arquivo `subgraph.yaml`, sua rede será automaticamente atualizada à especificada na opção `--network`. - -Agora, vamos supor que quer lançar o seu subgraph às redes `mainnet` e `sepolia`, e este é o seu `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -O seu arquivo de config de redes deve ficar assim: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Agora podemos executar um dos seguintes comandos: - -```sh -# Usar o arquivo networks.json padrão -yarn build --network sepolia - -# Usar arquivo com nome personalizado -yarn build --network sepolia --network-file local/do/config -``` - -O comando `build` atualizará o seu `subgraph.yaml` com a configuração `sepolia` e depois recompilará o subgraph. O seu arquivo `subgraph.yaml` agora deve parecer com isto: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Agora está tudo pronto para executar o `yarn deploy`. - -**Nota:** Como já levantado, desde o `graph-cli 0.32.0`, dá para executar diretamente o `yarn deploy` com a opção `--network`: - -```sh -# Usar o arquivo networks.json padrão -yarn deploy --network sepolia - -# Usar arquivo com nome personalizado -yarn deploy --network sepolia --network-file local/do/config -``` - -### Como usar o template subgraph.yaml - -Uma solução para versões mais antigas do graph-cli, que permite a parametrização de aspetos como endereços de contratos, é gerar partes dele com um sistema de templating como o [Mustache](https://mustache.github.io/) ou o [Handlebars](https://handlebarsjs.com/). - -Por exemplo, vamos supor que um subgraph deve ser lançado à mainnet e à Sepolia, através de diferentes endereços de contratos. Então, seria possível definir dois arquivos de config ao fornecer os endereços para cada rede: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -e - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Além disso, substituiria o nome da rede e os endereços no manifest com variáveis temporários `{{network}}` and `{{address}}` e renomearia o manifest a, por exemplo, `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Para poder gerar um manifest para uma rede, pode adicionar mais dois comandos ao `package.json` com uma dependência no `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -Para lançar este subgraph à mainnet ou à Sepolia, apenas um dos seguintes comandos precisaria ser executado: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -Veja um exemplo funcional disto [aqui](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Nota:** Este método também pode ser aplicado a situações mais complexas, onde é necessário substituir mais que endereços de contratos e nomes de redes, ou gerar mapeamentos e ABIs de templates também. - -## Como conferir a saúde do subgraph - -Se um subgraph for sincronizado com sucesso, isto indica que ele continuará a rodar bem para sempre. Porém, novos gatilhos na rede podem revelar uma condição de erro não testada, ou ele pode começar a se atrasar por problemas de desempenho ou com os operadores de nodes. - -O Graph Node expõe um endpoint do GraphQL que pode ser consultado em query, para conferir o status do seu subgraph. No Serviço Hospedado, ele está disponível no `https://api.thegraph.com/index-node/graphql`; em um node local, no port `8030/graphql`. Encontre o schema completo para este endpoint [aqui](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Veja um exemplo de query sobre o estado da versão atual de um subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -Isto rendará o `chainHeadBlock`, que pode ser comparado com o `latestBlock` no seu subgraph para conferir se está atrasado. `synced` informa se o subgraph conseguiu alcançar a chain. `health` pode atualmente resgatar os valores de `healthy`, se não houver erros; ou `failed`, se algum erro tiver impedido o progresso do subgraph. Neste caso, verifique o campo `fatalError` para mais detalhes. - -## Política de arquivamento de subgraphs no serviço hospedado - -O Serviço Hospedado é um Indexador gratuito de Graph Nodes. Os programadores podem lançar subgraphs e indexar uma gama de redes, que serão indexadas e disponibilizadas para consulta via graphQL. - -Para melhorar o desempenho do serviço para subgraphs ativos, o Serviço Hospedado arquivará subgraphs inativos. - -**Um subgraph é definido como "inativo" se tiver sido lançado ao Serviço Hospedado há mais de 45 dias e tiver recebido 0 queries nos últimos 45 dias.** - -Os desenvolvedores serão avisados por email se um dos seus subgraphs for marcado como inativo, e será removido após 7 dias. Caso queiram "ativar" o seu subgraph, podem fazê-lo com um query no playground graphQL, no Serviço Hospedado do seu subgraph. Os programadores sempre podem relançar um subgraph arquivado caso o necessitem novamente. - -## Política de arqivamento do Subgraph Studio - -Uma versão de subgraph no Studio é arquivada se, e apenas se, atender aos seguintes critérios: - -- A versão não foi publicada na rede (ou tem a publicação pendente) -- A versão foi criada há 45 dias ou mais -- O subgraph não foi consultado em 30 dias - -Além disto, quando uma nova versão é editada, se o subgraph ainda não foi publicado, então a versão N-2 do subgraph é arquivada. - -Todos os subgraphs afetados por esta política têm a opção de trazer de volta a versão em questão. diff --git a/website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 0712d6ce5264..000000000000 --- a/website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Como lançar um Subgraph ao Subgraph Studio ---- - -Estes são os passos para lançar o seu subgraph no Subgraph Studio: - -- Instale o The Graph CLI (com yarn ou npm) -- Como criar o seu subgraph no Subgraph Studio -- Autentique a sua conta da CLI -- Como lançar um Subgraph ao Subgraph Studio - -## Como instalar o Graph CLI - -Há uma CLI para lançar subgraphs ao [Subgraph Studio](https://thegraph.com/studio/). Aqui estão os comandos para instalar a `graph-cli`. Isto pode ser feito com npm ou yarn. - -**Instalação com o yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Instalação com o npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Como criar o seu subgraph no Subgraph Studio - -Antes de lançar o seu subgraph, precisa criar um subgraph no [Subgraph Studio](https://thegraph.com/studio/). Aprenda mais sobre isto na nossa [documentação do Studio](/deploying/subgraph-studio). - -## Como inicializar o seu Subgraph - -Quando o seu subgraph for criado no Subgraph Studio, inicialize o código do subgraph com o seguinte comando: - -```bash -graph init --studio -``` - -O valor `` pode ser encontrado na sua página de detalhes do subgraph no Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -Após executar o `graph init`, insira o endereço do contrato, a rede e a ABI que quer consultar. Fazer isto gerará uma nova pasta em sua máquina local, com um código básico para começar os trabalhos no seu subgraph. Logo, pode finalizar o seu subgraph para garantir que ele funciona como esperado. - -## Autenticação - -Antes de poder lançar o seu subgraph ao Subgraph Studio, entre na sua conta dentro da CLI. Para fazer isto, precisa da sua deploy key (chave de lançamento), que está na página "My Subgraphs" (Meus Subgraphs), ou na página de detalhes do seu subgraph. - -Aqui está o comando que deve usar para se autenticar a partir da CLI: - -```bash -graph auth --studio -``` - -## Como lançar um Subgraph ao Subgraph Studio - -Quando estiver pronto, já pode lançar o seu subgraph ao Subgraph Studio. Fazer isto não editará o seu subgraph na rede descentralizada; apenas para a sua conta no Studio, onde poderá testá-lo e atualizar os metadados. - -Aqui está o comando CLI que deverá usar para lançar o seu subgraph. - -```bash -graph deploy --studio -``` - -Após executar este comando, a CLI pedirá por um número de versão. Podes nomeá-lo como quiser, com rótulos como `0.1` e `0.2`; ou com letras incluídas, como `uniswap-v2.0.1`. Estes rótulos serão visíveis no Graph Explorer e podem ser usados por curadores para decidir se querem ou não sinalizar esta versão, então escolha bem. - -Após o lançamento, pode testar o seu subgraph no Subgraph Studio com o playground; lançar outra versão caso necessário; atualizar os metadados; e quando estiver pronto, editar o seu subgraph no Graph Explorer. diff --git a/website/pages/pt/deploying/hosted-service.mdx b/website/pages/pt/deploying/hosted-service.mdx deleted file mode 100644 index fbbb24e13564..000000000000 --- a/website/pages/pt/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: O que é o Serviço Hospedado? ---- - -> Nota: endpoints do serviço hospedado foram encerrados em 12 de junho de 2024, já que todos os subgraphs precisam se atualizar à Graph Network. Leia mais nas [Perguntas Frequentes Sobre o Nascer do Sol](/sunrise) - -Esta seção guiará-lhe pelo processo de lançar um subgraph ao [Serviço Hospedado](https://thegraph.com/hosted-service/). - -Se não tem uma conta no Serviço Hospedado, faça um cadastro com a sua conta do GitHub. Após se autenticar, pode começar a criar subgraphs através da UI e lançá-los do seu terminal. O Serviço Hospedado apoia uma boa quantidade de redes, como Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum e mais. - -Para uma lista compreensiva, veja [Redes Apoiadas](/developing/supported-networks/#hosted-service). - -## Como Criar um Subgraph - -Primeiro, siga as instruções [aqui](/developing/creating-a-subgraph/#install-the-graph-cli) para instalar o Graph CLI. Crie um subgraph com um passe de `graph init --product hosted-service` - -### De um Contrato Existente - -Se já tem um contrato inteligente lançado na sua rede de escolha, iniciar um novo subgraph a partir deste contrato já é um bom começo para usar o serviço hospedado. - -Poderá usar este comando para criar um subgraph que indexa todos os eventos de um contrato existente. Isto tentará retirar o ABI do contrato do explorador de blocos. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Além disso, também dá para usar os seguintes argumentos opcionais. Se a ABI não puder ser retirada do explorador de blocos, ela pede por um local de arquivo. Se quaisquer argumentos opcionais estiveram a faltar no comando, ele levanta um formulário interativo. - -```sh ---network \ ---abi \ -``` - -O ``, neste caso, é o seu nome de utilizador ou organização do GitHub; `` é o nome para o seu subgraph; e `` é o nome opcional do diretório onde o `graph init` colocará o exemplo de manifest do subgraph.`` é o endereço do seu contrato existente;`` é o nome da rede na qual o contrato vive; `` é um caminho local a um arquivo ABI de contrato. **Tanto `--network` quanto `--abi` são opcionais.** - -### De um Exemplo de Subgraph - -O segundo modo que o `graph init` apoia é criar um projeto novo a partir de um subgraph exemplo através do seguinte comando: - -``` -graph init --from-example --product hosted-service / [] -``` - -O subgraph de exemplo é baseado no contrato Gravity por Dani Grant, que gesta avatares de usuários e emite eventos `NewGravatar` ou `UpdateGravatar` sempre que são criados ou atualizados avatares. O subgraph lida com estes eventos ao escrever entidades `Gravatar` ao armazenamento do Graph Node e garantir que estes são atualizados de acordo com os eventos. Continue até o [manifest do subgraph](/developing/creating-a-subgraph#the-subgraph-manifest) para entender melhor ao que você deve prestar atenção, como eventos dos seus contratos inteligentes, mapeamentos, e mais. - -### De um Contrato de Proxy - -Para construir um subgraph personalizado para o propósito de monitorar um contrato de Proxy, inicialize o subgraph com a especificação do endereço do contrato de implementação. Quando o processo de inicialização for concluído, o último passo envolverá a atualização do nome da rede no arquivo subgraph.yaml ao endereço do contrato de Proxy. Você pode usar o comando abaixo. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Redes apoiadas no Serviço Hospedado - -Veja a lista das redes apoiadas [aqui](/developing/supported-networks). diff --git a/website/pages/pt/deploying/subgraph-studio.mdx b/website/pages/pt/deploying/subgraph-studio.mdx deleted file mode 100644 index 0afa4fe59623..000000000000 --- a/website/pages/pt/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Como Usar o Subgraph Studio ---- - -Bem-vindo ao seu novo ponto de partida 👩🏽‍🚀 - -O Subgraph Studio é o melhor lugar para construir e criar subgraphs, adicionar metadados, e editá-los no novo Explorer descentralizado (mais sobre isto [aqui](/network/explorer)). - -O que é possível no Subgraph Studio: - -- Criar um subgraph através da UI (interface de utilizador) do Studio -- Lançar um subgraph com a CLI -- Editar um subgraph com a UI do Studio -- Testá-lo no playground -- Integrá-lo na encenação com a URL de query -- Criar e gerir as suas chaves de API para subgraphs específicos - -No Subgraph Studio, tens controle total sobre os seus subgraphs. Pode não só testar os seus subgraphs antes de publicá-los, mas também restringir as suas chaves de API a domínios específicos e permitir que apenas certos indexadores possam consultar das suas chaves de API. - -Consultar subgraphs gera taxas de consulta, usadas para recompensar [Indexers](/network/indexing) na rede Graph. Se és um desenvolvedor de dApps ou de subgraphs, o Studio dará-lhe o poder para construir subgraphs melhores e, com eles, mover as suas consultas ou as da sua comunidade. O Studio é composto de 5 partes principais: - -- Os seus controles de conta de utilizador -- Uma lista de subgraphs que criaste -- Uma secção para gerir, visualizar detalhes e conferir o estado de um subgraph específico -- Uma secção para gerir as chaves de API necessárias para queries em um subgraph -- Uma secção para gerir as suas cobranças - -## Como Criar a Sua Conta - -1. Cadastre-se com a sua carteira - via MetaMask, WalletConnect, Coinbase Wallet ou Safe. -1. Quando entrar, verás a sua chave de lançamento (deploy key) única na página principal da sua conta. Isto permitirá-lhe editar os seus subgraphs ou gerir as suas chaves de API e cobranças. Terá uma chave de lançamento única que pode ser gerada novamente, caso suspeite que ela foi comprometida. - -## Como Criar um Subgraph no Subgraph Studio - - - -## Compatibilidade de Subgraph com a Graph Network - -Para ter apoio de Indexadores na Graph Network, os subgraphs devem: - -- Indexar uma [rede apoiada](/developing/supported-networks) -- Não deve usar quaisquer das seguintes características: - - ipfs.cat & ipfs.map - - Erros não-fatais - - Enxertos - -Mais características & redes serão adicionadas, pouco a pouco, à Graph Network. - -### Fluxo do Ciclo de Vida de um Subgraph - -![Ciclo de Vida de um Subgraph](/img/subgraph-lifecycle.png) - -Após ter criado o seu subgraph, poderás lançá-lo com a [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) (interface de linha de comando). Lançar um subgraph com a CLI envia o subgraph ao Studio, onde poderás testar subgraphs com o playground. Isto logo permitirá que edite no Graph Network. Para mais informações sobre preparar a CLI, [clique aqui](/developing/defining-a-subgraph#install-the-graph-cli) (e não se esqueça de ter a sua chave de lançamento em mãos!). Lembre-se, **lançar não é a mesma coisa que editar.** Ao lançar um subgraph, é só enviá-lo ao Studio, onde podes testá-lo. Quando um subgraph é editado, ele é publicado on-chain. - -## Como Testar o Seu Subgraph no Subgraph Studio - -Se gostaria de testar o seu subgraph antes de editá-lo na rede, podes fazer isto no Subgraph **Playground** ou conferir os seus logs. Os logs do Subgraph dirão **onde** o seu subgraph tem problemas, caso ele falhe. - -## Como Editar o Seu Subgraph no Subgraph Studio - -Parabéns por ter chegado tão longe! - -Para poder editar o seu subgraph com êxito, siga os passos detalhados [neste blog](/publishing/publishing-a-subgraph/). - -Confira o vídeo abaixo também: - - - -Lembre-se, enquanto passas pelo seu fluxo de publicação, poderá enviar pushes para o Arbitrum One ou o Arbitrum Sepolia. Se for um programador noviço de subgraph, recomendamos que comece a editar no Sepolia, que é gratuito. Isto permite-lhe ver como o subgraph funcionará no The Graph Explorer e testar elementos de curadoria. - -Os indexadores devem emitir relatos obrigatórios de Prova de Indexação a partir de um hash de bloco específico. Como editar um subgraph é feito on-chain, lembre-se que a transação pode demorar alguns minutos para ser aceita. Seja qual for o endereço que usar para publicar o contrato, será o único com a capacidade de publicar versões futuras. Escolha com cuidado! - -Subgraphs com sinal de curadoria são exibidos aos Indexadores para que eles possam ser indexados na rede descentralizada. Podes publicar subgraphs e sinalizar em uma transação, o que permite-lhe mintar (cunhar) o primeiro sinal de curadoria no subgraph e ainda poupa custos de gas. Ao adicionar o seu sinal ao sinal mais tarde fornecido pelos Curadores, seu subgraph terá mais chances de servir queries finalmente. - -**Agora que o seu subgraph está editado, vamos ver como você o dirigirá numa base regular.** Note que não podes publicar o seu subgraph na rede se a sincronização dele falhar. Isto costuma acontecer por erros no subgraph — os logs dirão-lhe onde eles estão! - -## Fazendo Versões do Seu Subgraph com a CLI - -Os programadores podem querer atualizar o seu subgraph por uma variedade de razões. Neste caso, dá para lançar uma nova versão do seu subgraph ao Studio com a CLI (só será privado neste ponto), e se ficar feliz com ela, esta poderá ser publicada no The Graph Explorer. Isto criará uma nova versão do seu subgraph que os curadores podem começar a sinalizar, e os Indexadores poderão indexar esta nova versão. - -Até recentemente, para atualizar os metadados dos seus subgraphs, os programadores eram obrigados a lançar e editar uma nova versão do seu subgraph ao Explorer. Agora, podem atualizar os metadados dos seus subgraphs **sem precisar publicar uma versão nova**. Os desenvolvedores podem atualizar seus detalhes de subgraph no Studio (sob a imagem de perfil, nome, descrição, etc.) com uma opção chamada **Update Details (Atualizar Detalhes)** no Graph Explorer. Se conferida, será gerada uma transação on-chain que atualiza os detalhes do subgraph no Explorer sem precisar editar uma versão nova com um lançamento novo. - -Por favor, lembra-se que há custos associados com a edição da nova versão de um subgraph à rede. Além das taxas de transação, os programadores também devem bancar uma parte da taxa de curadoria no sinal de automigração. Se os curadores não sinalizarem na nova versão do seu subgraph, não dá para editá-la. Para mais informações sobre os riscos da curadoria, leia mais [aqui](/network/curating). - -### Arquivamento Automático de Versões de Subgraphs - -Quando lançar uma nova versão de subgraph no Subgraph Studio, a versão anterior será arquivada. Versões arquivadas não serão indexadas ou sincronizadas; assim, não são sujeitas a queries. Versões arquivadas do seu subgraph podem ser resgatadas na UI do Studio; as versões anteriores de subgraphs não editados lançados ao Studio serão arquivadas automaticamente. - -![Subgraph Studio — Tirar do Arquivo](/img/Unarchive.png) diff --git a/website/pages/pt/developing/creating-a-subgraph.mdx b/website/pages/pt/developing/creating-a-subgraph.mdx deleted file mode 100644 index 005b9be44b01..000000000000 --- a/website/pages/pt/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Como criar um Subgraph ---- - -Um subgraph extrai dados de uma blockchain, os processa e os armazena para poderem ser consultados facilmente via GraphQL. - -![Como definir um Subgraph](/img/defining-a-subgraph.png) - -A definição de subgraph consiste de alguns arquivos: - -- `subgraph.yaml`: um arquivo YAML que contém o manifest do subgraph - -- `schema.graphql`: um schema GraphQL que define quais dados são armazenados para o seu subgraph, e como consultá-los em query via GraphQL - -- `AssemblyScript Mappings`: código em [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) que traduz dos dados de eventos às entidades definidas no seu schema (por ex., `mapping.ts` neste tutorial) - -> Para utilizar o seu subgraph na rede descentralizada do The Graph, será necessário [criar uma chave API](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). É recomendado [adicionar um sinal](/network/curating/#how-to-signal) ao seu subgraph com, no mínimo, [3000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Antes de se aprofundar nos conteúdos do arquivo manifest, instale o [Graph CLI](https://github.com/graphprotocol/graph-tooling), que será necessário para construir e adicionar um subgraph. - -## Como instalar o Graph CLI - -O Graph CLI é escrito em JavaScript, e só pode ser usado após instalar o `yarn` ou o `npm`; vamos supor que tens o yarn daqui em diante. - -Quando tiver o `yarn`, instale o Graph CLI com o seguinte - -**Instalação com o yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Instalação com o npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Instalado, o comando `graph init` pode preparar um novo projeto de subgraph, seja de um contrato existente ou de um exemplo de subgraph. Este comando serve para criar um subgraph no Subgraph Studio ao passar o `graph init --product subgraph-studio`. Se já tem um contrato inteligente lançado na mainnet do Ethereum ou uma de suas testnets, inicializar um novo subgraph daquele contrato pode ser um bom começo. - -## De um Contrato Existente - -O seguinte comando cria um subgraph que indexa todos os eventos de um contrato existente. Ele tenta buscar a ABI de contrato do Etherscan e resolve solicitar um local de arquivo. Se quaisquer dos argumentos opcionais estiverem a faltar, ele levará-te a um formulário interativo. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -O `` é a ID do seu subgraph no Subgraph Studio, visível na página dos detalhes do seu subgraph. - -## De um Exemplo de Subgraph - -O segundo modo que o `graph init` apoia é criar um projeto a partir de um exemplo de subgraph. O seguinte comando faz isso: - -```sh -graph init --studio -``` - -O [subgraph de exemplo](https://github.com/graphprotocol/example-subgraph) é baseado no contrato Gravity por Dani Grant, que administra avatares de usuários e emite eventos `NewGravatar` ou `UpdateGravatar` sempre que avatares são criados ou atualizados. O subgraph lida com estes eventos ao escrever entidades `Gravatar` ao armazenamento do Graph Node e garantir que estes são atualizados de acordo com os eventos. As seguintes secções lidarão com os arquivos que compõem o manifest do subgraph para este exemplo. - -## Como Adicionar Novos dataSources para um Subgraph Existente - -Desde a `v0.31.0`, o `graph-cli` apoia a adição de novos dataSources para um subgraph existente, através do comando `graph add`. - -```sh -graph add
[] - -Opções: - - --abi Caminho à ABI do contrato (padrão: baixar do Etherscan) - --contract-name Nome do contrato (padrão: Contract) - --merge-entities Se fundir ou não entidades com o mesmo nome (padrão: false) - --network-file Caminho ao arquivo de configuração das redes (padrão: "./networks.json") -``` - -O comando `add` pegará a ABI do Etherscan (a não ser que um caminho para a ABI seja especificado com a opção `--abi`), e criará um novo `dataSource` da mesma maneira que o comando `graph init` cria um `dataSource` `--from-contract`, a atualizar o schema e os mapeamentos de acordo. - -A opção `--merge entities` identifica como o programador gostaria de lidar com nomes de conflito em `entity` e `event`: - -- Se for `true`: o novo `dataSource` deve usar `eventHandlers` & `entities` existentes. -- Se for `false`: um novo handler de entidades & eventos deve ser criado com `${dataSourceName}{EventName}`. - -O endereço (`address`) será escrito ao `networks.json` para a rede relevante. - -> **Nota:** Quando usar a cli interativa, após executar o `graph init` com êxito, receberá uma solicitação para adicionar um novo `dataSource`. - -## O Manifest do Subgraph - -O manifest do subgraph `subgraph.yaml` define os contratos inteligentes indexados pelo seu subgraph; a quais eventos destes contratos prestar atenção; e como mapear dados de eventos a entidades que o Graph Node armazena e permite queries. Veja a especificação completa para manifests de subgraph [aqui](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -Para o subgraph de exemplo, o `subgraph.yaml` é: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -As entradas importantes para atualizar para o manifest são: - -- `specVersion`: uma versão de semver que identifica a estrutura de mainfest apoiada e a sua funcionalidade para o subgraph. A versão mais recente é `1.2.0`. Veja a secção de [lançamentos do specVersion](#specversion-releases) para mais detalhes sobre recursos & lançamentos. - -- `description`: uma descrição legível a humanos do que é o subgraph. Esta descrição é exibida pelo Graph Explorer quando o subgraph é lançado ao Subgraph Studio. - -- `repository`: a URL do repositório onde está o manifest do subgraph. Isto também é exibido no Graph Explorer. - -- `features`: uma lista de todos os nomes de [feature](#experimental-features) usados. - -- `indexerHints.prune`: Define a retenção de dados históricos de blocos para um subgraph. Veja [prune](#prune) na secção [indexerHints](#indexer-hints). - -- `dataSources.source`: o endereço do contrato inteligente que abastece o subgraph, e a ABI do contrato inteligente a ser usada. O endereço é opcional; omiti-lo permite indexar eventos correspondentes de todos os contratos. - -- `dataSources.source.startBlock`: o número opcional do bloco de onde a fonte de dados começa a indexar. Em muitos casos, sugerimos usar o bloco em que o contrato foi criado. - -- `dataSources.source.endBlock`: O número opcional do bloco onde a fonte de dados pára de indexar, o que inclui aquele bloco. Versão de spec mínima requerida: `0.0.9`. - -- `dataSources.context`: pares de key-value que podem ser usados dentro de mapeamentos de subgraph. Apoia vários tipos de dados como `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, e `BigInt`. Cada variável deve especificar o seu `type` e `data`. Estas variáveis de contexto são então acessíveis nos arquivos de mapeamento, a fim de oferecer opções mais configuráveis para o desenvolvimento de subgraphs. - -- `dataSources.mapping.entities`: as entidades que a fonte de dados escreve ao armazenamento. O schema para cada entidade é definido no arquivo schema.graphql. - -- `dataSources.mapping.abis`: um ou mais arquivos ABI nomeados para o contrato-fonte, além de quaisquer outros contratos inteligentes com os quais interage de dentro dos mapeamentos. - -- `dataSources.mapping.eventHandlers`: lista os eventos de contratos inteligentes aos quais este subgraph reage, e os handlers no mapping — ./src/mapping.ts no exemplo — que transformam estes eventos em entidades no armazenamento. - -- `dataSources.mapping.callHandlers`: lista as funções de contratos inteligentes aos quais este subgraph reage, e os handlers no mapping que transformam as entradas e saídas para chamadas de função em entidades no armazenamento. - -- `dataSources.mapping.blockHandlers`: lista os blocos aos quais este subgraph reage, e handlers no mapeamento quando um bloco é atrelado à chain. Sem um filtro, o handler de blocos será executado em todo bloco. Um filtro de chamada opcional pode ser fornecido ao adicionar um campo `filter` com `kind: call` no handler. Isto só executará o handler se o bloco conter no mínimo uma chamada ao contrato da fonte de dados. - -Um único subgraph pode indexar dados de vários contratos inteligentes. Adicione uma entrada para cada contrato cujos dados devem ser indexados ao arranjo `dataSources`. - -### Ordem de Handlers de Gatilhos - -Os gatilhos para uma fonte de dados dentro de um bloco são ordenados com o seguinte processo: - -1. Gatilhos de evento e chamada são, primeiro, ordenados por índice de transação no bloco. -2. Gatilhos de evento e chamada dentro da mesma transação são ordenados a usar uma convenção: primeiro, gatilhos de evento, e depois, de chamada, cada tipo a respeitar a ordem em que são definidos no manifest. -3. Gatilhos de blocos são executados após gatilhos de evento e chamada, na ordem em que são definidos no manifest. - -Estas regras de organização estão sujeitas à mudança. - -> **Nota:** Quando novas [fontes de dados dinâmicas](#data-source-templates-for-dynamically-created-contracts) forem criadas, os handlers definidos para fontes de dados dinâmicas só começarão o processamento após todos os handlers existentes forem processados, e repetirão a mesma sequência quando ativados. - -### Filtros de Argumentos Indexados / Filtros de Tópicos - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### Como Filtros de Tópicos Funcionam - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Declaração de evento com parâmetros indexados para endereços - event Transfer(address indexed from, address indexed to, uint256 value); - - // Função para simular a transferência de tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -Neste exemplo: - -- O evento `Transfer` é usado para gravar transações de tokens entre endereços. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- A função `transfer` é uma representação simples de uma ação de transferência de token, e emite o evento Transfer sempre que é chamada. - -#### Configuração em Subgraphs - -Filtros de tópicos são definidos diretamente na configuração de handlers de eventos no manifest do subgraph. Veja como eles são configurados: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -Neste cenário: - -- `topic1` corresponde ao primeiro argumento indexado do evento, `topic2` ao segundo e `topic3` ao terceiro. -- Cada tópico pode ter um ou mais valores, e um evento só é processado se corresponder a um dos valores em cada tópico especificado. - -##### Lógica de Filtro - -- Dentro de um Tópico Único: A lógica funciona como uma condição OR. O evento será processado se corresponder a qualquer dos valores listados num tópico. -- Entre Tópicos Diferentes: A lógica funciona como uma condição AND. Um evento deve atender a todas as condições especificadas em vários tópicos para acionar o handler associado. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -Nesta configuração: - -- `topic1` é configurado para filtrar eventos `Transfer` onde `0xAddressA` é o remetente. -- `topic2` é configurado para filtrar eventos `Transfer` onde `0xAddressB` é o remetente. -- O subgraph só indexará transações que ocorrerem diretamente do `0xAddressA` ao `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -Nesta configuração: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## eth_call declarada - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Exemplo de Configuração no Manifest do Subgraph - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### Versões do SpecVersion - -| Versão | Notas de atualização | -| :-: | --- | -| 1.2.0 | Adicionado apoio a [Filtragem de Argumentos Indexados](/#indexed-argument-filters--topic-filters) & `eth_call` declarado | -| 1.1.0 | Apoio a [Séries de Tempo & Agregações](#timeseries-and-aggregations). Apoio adicionado ao tipo `Int8` para `id`. | -| 1.0.0 | Apoia o recurso [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) para fazer pruning de subgraphs | -| 0.0.9 | Apoio ao recurso `endBlock` | -| 0.0.8 | Adicionado apoio ao polling de [Handlers de Bloco](developing/creating-a-subgraph/#polling-filter) e [Handlers de Inicialização](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Adicionado apoio a [Fontes de Arquivos de Dados](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Apoio à variante de calculação de [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi). | -| 0.0.5 | Adicionado apoio a handlers de eventos com acesso a recibos de transação. | -| 0.0.4 | Adicionado apoio à gestão de recursos de subgraph. | - -### Como Obter as ABIs - -Os arquivos da ABI devem combinar com o(s) seu(s) contrato(s). Há algumas maneiras de obter estes arquivos: - -- Caso construa o seu próprio projeto, provavelmente terá acesso às suas ABIs mais recentes. -- Caso construa uma subgraph para um projeto público, pode baixar aquele projeto no seu computador e construir a ABI ao usar o comando [`truffle compile`](https://truffleframework.com/docs/truffle/overview) ou compilar com solc. -- Também pode achar a ABI no [Etherscan](https://etherscan.io/), mas isto nem sempre é confiável, pois a ABI exibida lá pode estar ultrapassada. Tenha certeza que tem a ABI certa, senão, pode haver um erro ao executar o seu subgraph. - -## O Schema GraphQL - -O schema para o seu subgraph está no arquivo `schema.graphql`. Schemas de GraphQL são definidos usando a linguagem de definição GraphQL. Se você nunca tiver escrito um schema nesta linguagem, confira este preparatório no sistema de tipos do GraphQL. A documentação de referência para schemas em GraphQL está na seção [API GraphQL](/querying/graphql-api). - -## Como Definir Entidades - -Antes de definir entidades, é important parar para pensar sobre como os seus dados são estruturados e ligados. Todas as consultas serão feitas perante o modelo de dados definido no schema do subgraph e as entidades indexadas pelo subgraph. Portanto, é bom definir o schema do subgraph de uma forma que atenda as necessidades do seu dApp. Pode ser conveniente imaginar entidades como "objetos a conter dados", ao invés de eventos ou funções. - -Com o The Graph, pode simplesmente definir tipos de entidade no `schema.graphql`; assim, o Graph Node gerará campos de nível alto para fazer queries de instâncias únicas e coleções daquele tipo de entidade. Cada tipo que deve ser uma entidade tem de ser anotado com uma diretiva `@entity`. As entidades são mutáveis, o que significa que os mapeamentos podem carregar entidades existentes, modificá-las e armazenar novas versões delas. A mutabilidade vem com um preço; por exemplo, para tipos de entidade que claramente não podem ser alterados por conter dados extraídos exatamente da chain, é recomendado marcá-los como imutáveis com `@entity(immutable: true)`. Entidades imutáveis podem ser alteradas com mapeamentos, desde que as alterações aconteçam no mesmo bloco em que a entidade foi criada. Entidades imutáveis são muito mais rápidas de escrever e consultar, e então devem ser usadas sempre que possível. - -### Bom Exemplo - -A entidade `Gravatar` embaixo é estruturada em torno de um objeto Gravatar, e é um bom exemplo de como pode ser definida uma entidade. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Mau Exemplo - -As entidades `GravatarAccepted` e `GravatarDeclined` abaixo têm base em torno de eventos. Não é recomendado mapear eventos ou chamadas de função a entidades identicamente. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Campos Opcionais e Obrigatórios - -Campos de entidade podem ser definidos como obrigatórios ou opcionais. Os obrigatórios são indicados no schema pelo código `!`. Se um campo obrigatório não for determinado no mapeamento, receberá este erro ao consultar o campo: - -``` -Null value resolved for non-null field 'name' -``` - -Cada entidade deve ter um campo `id`, que deve ser do tipo `Bytes!` ou `String!`. Geralmente é melhor usar `Bytes!` — a não ser que o `id` tenha texto legível para humanos, já que entidades com as ids `Bytes!` são mais fáceis de escrever e consultar como aquelas com um `id` `String!`. O campo `id` serve como a chave primária, e deve ser singular entre todas as entidades do mesmo tipo. Por razões históricas, o tipo `ID!` também é aceito, como um sinônimo de `String!`. - -Para alguns tipos de entidade, o `id` é construído das id's de duas outras entidades; isto é possível com o `concat`, por ex., `let id = left.id.concat(right.id)` para formar a id a partir das id's de `left` e `right`. Da mesma forma, para construir uma id a partir da id de uma entidade existente e um contador `count`, pode ser usado o `let id = left.id.concatI32(count)`. Isto garante a concatenação a produzir id's únicas enquanto o comprimento do `left` for o mesmo para todas as tais entidades; por exemplo, porque o `left.id` é um `Address` (endereço). - -### Tipos Embutidos de Escalar - -#### Escalares Apoiados pelo GraphQL - -Nós apoiamos os seguintes escalares na nossa API do GraphQL: - -| Tipo | Descrição | -| --- | --- | -| `Bytes` | Arranjo de bytes, representado como string hexadecimal. Usado frequentemente por hashes e endereços no Ethereum. | -| `String` | Escalar para valores `string`. Caracteres nulos são removidos automaticamente. | -| `Boolean` | Escalar para valores `boolean`. | -| `Int` | A especificação do GraphQL define o `Int` como um inteiro assinado de 32 bits. | -| `Int8` | Um número inteiro assinado em 8 bits, também conhecido como um número inteiro assinado em 64 bits, pode armazenar valores de -9,223,372,036,854,775,808 a 9,223,372,036,854,775,807. Prefira usar isto para representar o `i64` do ethereum. | -| `BigInt` | Números inteiros grandes. Usados para os tipos `uint32`, `int64`, `uint64`, ..., `uint256` do Ethereum. Nota: Tudo abaixo de `uint32`, como `int32`, `uint24` ou `int8` é representado como `i32`. | -| `BigDecimal` | `BigDecimal` Decimais de alta precisão representados como um significando e um exponente. O alcance de exponentes é de -6143 até +6144. Arredondado para 34 dígitos significantes. | -| `Timestamp` | É um valor `i64` em microssegundos. Usado frequentemente para campos `timestamp` para séries de tempo e agregações. | - -#### Enums - -Também pode criar enums dentro de um schema. Enums têm a seguinte sintaxe: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Quando o enum for definido no schema, pode usar a representação do string do valor enum para determinar um campo enum numa entidade. Por exemplo, pode implantar o `tokenStatus` no `SecondOwner` ao definir primeiro a sua entidade e depois determinar o campo com `entity.tokenStatus = "SecondOwner"`. O exemplo abaixo demonstra como ficaria a entidade do Token com um campo enum: - -Veja mais detalhes sobre a escrita de enums na [documentação do GraphQL](https://graphql.org/learn/schema/). - -#### Relacionamentos de Entidades - -Uma entidade pode ter relacionamentos com uma ou mais entidades no seu schema; estes podem ser tratados nas suas consultas. Os relacionamentos no The Graph são unidirecionais, e é possível simular relacionamentos bidirecionais ao definir um relacionamento unidirecional em cada "lado" do relacionamento projetado. - -Relacionamentos são definidos em entidades como qualquer outro campo, sendo que o tipo especificado é o de outra entidade. - -#### Relacionamentos Um-com-Um - -Defina um tipo de entidade `Transaction` com um relacionamento um-com-um opcional, com um tipo de entidade `TransactionReceipt`: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### Relacionamentos Um-com-Vários - -Defina um tipo de entidade `TokenBalance` com um relacionamento um-com-vários, exigido com um tipo de entidade Token: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Buscas Reversas - -Buscas reversas podem ser definidas em uma entidade pelo campo `@derivedFrom`. Isto cria um campo virtual na entidade, que pode ser consultado, mas não pode ser configurado manualmente pela API de mapeamentos. Em vez disto, ele é derivado do relacionamento definido na outra entidade. Para tais relacionamentos, raramente faz sentido armazenar ambos os lados do relacionamento, e tanto o indexing quanto o desempenho dos queries melhorarão quando apenas um lado for armazenado, e o outro derivado. - -Para relacionamentos um-com-vários, o relacionamento sempre deve ser armazenado no lado 'um', e o lado 'vários' deve sempre ser derivado. Armazenar o relacionamento desta maneira, em vez de armazenar um arranjo de entidades no lado 'vários', melhorará dramaticamente o desempenho para o indexing e os queries no subgraph. Em geral, evite armazenar arranjos de entidades enquanto for prático. - -#### Exemplo - -Podemos fazer os saldos para um token acessíveis a partir do mesmo token ao derivar um campo `tokenBalances`: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Relacionamentos Vários-com-Vários - -Para relacionamentos vários-com-vários, como um conjunto de utilizadores em que cada um pertence a qualquer número de organizações, o relacionamento é mais simplesmente — mas não mais eficientemente — modelado como um arranjo em cada uma das duas entidades envolvidas. Se o relacionamento for simétrico, apenas um lado do relacionamento precisa ser armazenado, e o outro lado pode ser derivado. - -#### Exemplo - -Defina uma busca reversa a partir de um tipo de entidade `User` para um tipo de entidade `Organization`. No exemplo abaixo, isto é feito ao buscar pelo atributo `members` a partir de dentro da entidade `Organization`. Em queries, o campo `organizations` no `User` será resolvido ao encontrar todas as entidades `Organization` que incluem a ID do utilizador. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -Uma maneira mais eficiente para armazenar este relacionamento é com uma mesa de mapeamento que tem uma entrada para cada par de `User` / `Organization`, com um schema como - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -Esta abordagem requer que os queries desçam a um nível adicional para retirar, por exemplo, as organizações para utilizadores: - -```graphql -query usersWithOrganizations { - users { - organizations { - # isto é uma entidade UserOrganization - organization { - name - } - } - } -} -``` - -Esta maneira mais elaborada de armazenar relacionamentos vários-com-vários armazenará menos dados para o subgraph, portanto, o subgraph ficará muito mais rápido de indexar e consultar. - -#### Como adicionar comentários ao schema - -Pela especificação do GraphQL, é possível adicionar comentários acima de atributos de entidade do schema com o símbolo de hash `#`. Isto é ilustrado no exemplo abaixo: - -```graphql -type MyFirstEntity @entity { - # identificador único e chave primária da entidade - id: Bytes! - address: Bytes! -} -``` - -## Como Definir Campos de Busca Fulltext - -Buscas fulltext filtram e ordenam entidades baseadas num texto inserido. Queries fulltext podem retornar resultados para palavras semelhantes ao processar o texto inserido antes de compará-los aos dados do texto indexado. - -Uma definição de query fulltext inclui: o nome do query, o dicionário do idioma usado para processar os campos de texto, o algoritmo de ordem usado para ordenar os resultados, e os campos incluídos na busca. Todo query fulltext pode ter vários campos, mas todos os campos incluídos devem ser de um único tipo de entidade. - -Para adicionar um query fulltext, inclua um tipo `_Schema_` com uma diretiva fulltext no schema em GraphQL. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -O exemplo `bandSearch` serve, em queries, para filtrar entidades `Band` baseadas nos documentos de texto nos campos `name`, `description` e `bio`. Confira a página [API GraphQL - Consultas](/querying/graphql-api#queries) para uma descrição da API de busca fulltext e mais exemplos de uso. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Gestão de Características](#experimental-features):** A partir do `specVersion` `0.0.4` em diante, o `fullTextSearch` deve ser declarado sob a seção `features` no manifest do subgraph. - -### Idiomas apoiados - -Escolher um idioma diferente terá um efeito definitivo, porém às vezes sutil, na API da busca fulltext. Campos cobertos por um campo de query fulltext serão examinados no contexto do idioma escolhido, para que os lexemas produzidos pela análise e pelos queries de busca variem de idioma para idioma. Por exemplo: ao usar o dicionário turco, "token" é abreviado para "toke" enquanto, claro, o dicionário em inglês o categorizará como "token". - -Dicionários apoiados: - -| Código | Dicionário | -| ------ | ----------- | -| simple | Geral | -| da | Dinamarquês | -| nl | Holandês | -| en | Inglês | -| fi | Finlandês | -| fr | Francês | -| de | Alemão | -| hu | Húngaro | -| it | Italiano | -| no | Norueguês | -| pt | Português | -| ro | Romeno | -| ru | Russo | -| es | Espanhol | -| sv | Sueco | -| tr | Turco | - -### Algoritmos de Ordem - -Algoritmos apoiados para a organização de resultados: - -| Algoritmo | Descrição | -| ------------- | --------------------------------------------------------------------------------- | -| rank | Organiza os resultados pela qualidade da correspondência (0-1) da busca fulltext. | -| proximityRank | Parecido com o rank, mas também inclui a proximidade das correspondências. | - -## Como Escrever Mapeamentos - -Os mapeamentos tomam dados de uma fonte particular e os transformam em entidades que são definidas dentro do seu schema. São escritos em um subconjunto do [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) chamado [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki), que pode ser compilado ao WASM ([WebAssembly](https://webassembly.org/)). O AssemblyScript é mais rígido que o TypeScript normal, mas rende uma sintaxe familiar. - -Para cada handler de evento definido no `subgraph.yaml` sob o `mapping.eventHandlers`, crie uma função exportada de mesmo nome. Cada handler deve aceitar um único parâmetro chamado `event` com um tipo a corresponder ao nome do evento sendo lidado. - -No subgraph de exemplo, o `src/mapping.ts` contém handlers para os eventos `NewGravatar` e `UpdatedGravatar`: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -O primeiro handler toma um evento `NewGravatar` e cria uma nova entidade `Gravatar` com o `new Gravatar(event.params.id.toHex())`, e assim popula os campos da entidade com os parâmetros de evento correspondentes. Esta instância da entidade é representada pelo variável `gravatar`, com um valor de id de `event.params.id.toHex()`. - -O segundo handler tenta carregar o `Gravatar` do armazenamento do Graph Node. Se ele ainda não existe, ele é criado por demanda. A entidade é então atualizada para corresponder aos novos parâmetros de evento, antes de ser devolvida ao armazenamento com `gravatar.save()`. - -### IDs Recomendadas para Criar Novas Entidades - -Recomendamos muito utilizar `Bytes` como o tipo para campos `id`, e só usar o `String` para atributos que realmente contenham texto legível para humanos, como o nome de um token. Abaixo estão alguns valores recomendados de `id` para considerar ao criar novas entidades. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- Para entidades que armazenam dados agregados como, por exemplo, volumes diários de trading, a `id` costuma conter o número do dia. Aqui, usar `Bytes` como a `id` é beneficial. Determinar a `id` pareceria com - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Converta endereços constantes em `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -Há uma [Biblioteca do Graph Typescript](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts), com utilidades para interagir com o armazenamento do Graph Node e conveniências para lidar com entidades e dados de contratos inteligentes. Ela pode ser importada ao `mapping.ts` do `@graphprotocol/graph-ts`. - -### Gestão de entidades com IDs idênticas - -Ao criar e salvar uma nova entidade, se já houver uma com a mesma ID, vale sempre usar as propriedades da nova entidade durante o processo de fusão. Isto significa que a entidade existente será atualizada com os valores da entidade nova. - -Se um valor nulo for propositadamente determinado para um campo na nova entidade com a mesma ID, a entidade existente será atualizada com o valor nulo. - -Se nenhum valor for inserido para um campo na nova entidade com a mesma ID, o campo também resultará em nulo. - -## Geração de Código - -Para tornar mais fácil e seguro a tipos o trabalho com contratos inteligentes, eventos e entidades, o Graph CLI pode gerar tipos de AssemblyScript a partir do schema GraphQL do subgraph e das ABIs de contratos incluídas nas fontes de dados. - -Isto é feito com - -```sh -graph codegen [--output-dir ] [] -``` - -mas geralmente, os subgraphs já são pré-configurados através do `package.json` para alcançar o mesmo com a execução de um dos seguintes: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Isto gerará uma classe de AssemblyScript para todo contrato inteligente nos arquivos ABI mencionados no `subgraph.yaml`, permitindo ligar estes contratos a endereços específicos nos mapeamentos e chamar métodos de contratos de apenas-leitura contra o bloco a ser processado. Também gerará uma classe para todo evento de contrato para fornecer acesso fácil a parâmetros de eventos, assim como ao bloco e a transação dos quais o evento originou. Todos estes tipos são escritos para `//.ts`. No subgraph de exemplo, isto seria o `generated/Gravity/Gravity.ts`, permitindo que estes tipos sejam importados com mapeamentos. - -```javascript -import { - // Classe do contrato: - Gravity, - // Classes de eventos: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -Além disto, uma classe é gerada para cada tipo de entidade no schema GraphQL do subgraph. Estas classes rendem carregamento, acesso de leitura e escritura para campos de entidades com segurança de tipos, além de um método `save()` para escrever entidades ao armazenamento. Todas as classes de entidades são escritas no `/schema.ts`, permitindo que os mapeamentos as importem com - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Nota:** A geração de códigos deve ser executada novamente após todas as mudanças ao schema GraphQL ou às ABIs incluídas no manifest. Ela também deve ser executada pelo menos uma vez antes de construir ou lançar o subgraph. - -A geração de código não confere o seu código de mapeamento no `src/mapping.ts`. Se quiser conferir isto antes de tentar lançar o seu subgraph ao Graph Explorer, pode executar o `yarn build` e consertar quaisquer erros de sintaxe que o compilador TypeScript possa encontrar. - -## Modelos de Fontes de Dados - -Um padrão comum em contratos inteligentes compatíveis com EVMs é o uso de contratos de registro ou fábrica. Nisto, um contrato cria, gesta ou refere a um número arbitrário de outros contratos, cada um com o seu próprio estado e eventos. - -Os endereços destes subcontratos podem ou não ser conhecidos imediatamente, e muitos destes contratos podem ser criados e/ou adicionados ao longo do tempo. É por isto que, em muitos casos, é impossível definir uma única fonte de dados ou um número fixo de fontes de dados, e é necessária uma abordagem mais dinâmica: _modelos de fontes de dados_. - -### Fonte de Dados para o Contrato Principal - -Primeiro, defina uma fonte de dados regular para o contrato principal. Abaixo está um exemplo simplificado de fonte de dados para o contrato de fábrica de trocas do [Uniswap](https://uniswap.org). Preste atenção ao handler de evento `NewExchange(address,address)`: é emitido quando um novo contrato de troca é criado on-chain pelo contrato de fábrica. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Modelos de Fontes de Dados para Contratos Criados Dinamicamente - -Depois, adicione _modelos de fontes de dados_ ao manifest. Estes são idênticos a fontes de dados regulares, mas não têm um endereço de contrato predefinido sob `source`. Tipicamente, definiria um modelo para cada tipo de subcontrato gestado ou referenciado pelo contrato parente. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... outros campos de fonte para o contrato principal ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Como Instanciar um Modelo de Fontes de Dados - -No passo final, atualize o seu mapeamento de contratos para criar uma instância dinâmica de fontes de dados de um dos modelos. Neste exemplo, mudarias o mapeamento do contrato principal para importar o modelo `Exchange` e chamar o método `Exchange.create(address)` nele, para começar a indexar o novo contrato de troca. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Comece a indexar a troca; `event.params.exchange` é o - // endereço do novo contrato de troca - Exchange.create(event.params.exchange) -} -``` - -> **Nota:** Uma nova fonte de dados só processará as chamadas e eventos para o bloco onde ele foi criado e todos os blocos a seguir. Porém, não serão processados dados históricos, por ex, contidos em blocos anteriores. -> -> Se blocos anteriores conterem dados relevantes à nova fonte, é melhor indexá-los ao ler o estado atual do contrato e criar entidades que representem aquele estado na hora que a nova fonte de dados for criada. - -### Contextos de Fontes de Dados - -Contextos de fontes de dados permitem passar configurações extras ao instanciar um template. Em nosso exemplo, vamos dizer que trocas são associadas com um par de trading particular, que é incluído no evento `NewExchange`. Essa informação pode ser passada na fonte de dados instanciada, como: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Dentro de um mapeamento do modelo `Exchange`, dá para acessar o contexto: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Há setters e getters como `setString` e `getString` para todos os tipos de valores. - -## Blocos Iniciais - -O `startBlock` é uma configuração opcional que permite-lhe definir de qual bloco na chain a fonte de dados começará a indexar. Determinar o bloco inicial permite que a fonte de dados potencialmente pule milhões de blocos irrelevantes. Tipicamente, um programador de subgraph configurará o `startBlock` ao bloco em que o contrato inteligente da fonte de dados foi criado. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Nota:** O bloco da criação do contrato pode ser buscado rapidamente no Etherscan: -> -> 1. Procure pelo contrato ao inserir o seu endereço na barra de busca. -> 2. Clique no hash da transação da criação na seção `Contract Creator`. -> 3. Carregue a página dos detalhes da transação, onde encontrará o bloco inicial para aquele contrato. - -## IndexerHints - -A configuração `indexerHints`, no manifest de um subgraph, providencia diretivas para Indexadores processarem e gestarem um subgraph. Ela influencia decisões operacionais entre gestão de dados, estratégias de indexação e otimizações. Atualmente ela tem a opção `prune` para lidar com a retenção ou o pruning de dados históricos. - -> Este recurso está disponível desde a `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Define a retenção de dados históricos de bloco para um subgraph. As opções incluem: - -1. `"never"`: Nenhum pruning de dados históricos; retém o histórico completo. -2. `"auto"`: Retém o histórico mínimo necessário determinado pelo Indexador e otimiza o desempenho das queries. -3. Um número específico: Determina um limite personalizado no número de blocos históricos a guardar. - -``` - indexerHints: - prune: auto -``` - -> O termo "histórico", neste contexto de subgraphs, refere-se ao armazenamento de dados que refletem os estados antigos de entidades mutáveis. - -O histórico, desde um bloco especificado, é necessário para: - -- [Queries de viagem no tempo](/querying/graphql-api/#time-travel-queries), que permitem queries dos estados anteriores destas entidades em blocos específicos, através do histórico do subgraph -- O uso do subgraph como uma [base de enxerto](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) em outro subgraph naquele bloco -- Rebobinar o subgraph de volta àquele bloco - -Se os dados históricos desde aquele bloco tiverem passado por pruning, as capacidades acima não estarão disponíveis. - -> Vale usar o `"auto"`, por maximizar o desempenho de queries e ser suficiente para a maioria dos utilizadores que não requerem acesso a dados extensos no histórico. - -Para subgraphs que usam [queries de viagem no tempo](/querying/graphql-api/#time-travel-queries), recomendamos configurar um número especifico de blocos para reter dados históricos ou usar o `prune: never` para manter todos os estados históricos da entidade. Seguem abaixo exemplos de como configurar ambas as opções nas configurações do seu subgraph: - -Para reter uma quantidade específica de dados históricos: - -``` - indexerHints: - prune: 1000 # Substitua 1000 pelo número desejado de blocos a reter -``` - -Para preservar o histórico completo dos estados da entidade: - -``` -indexerHints: - prune: never -``` - -É possível verificar o bloco mais antigo (com estado histórico) para um subgraph ao fazer um query da [API de Estado de Indexação](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note que o `earliestBlock` é o bloco mais antigo com dados históricos, que será mais recente que o `startBlock` (bloco inicial) especificado no manifest, se o subgraph tiver passado por pruning. - -## Handlers de Eventos - -Handlers de eventos em um subgraph reagem a eventos específicos emitidos por contratos inteligentes na blockchain e acionam handlers definidos no manifest do subgraph. Isto permite que subgraphs processem e armazenem dados conforme a lógica definida. - -### Como Definir um Handler de Evento - -Um handler de evento é declarado dentro de uma fonte de dados na configuração YAML do subgraph. Ele especifica quais eventos devem ser escutados e a função correspondente a ser executada quando estes eventos forem detetados. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Filtro de tópico opcional que só filtra eventos com o tópico especificado. -``` - -## Handlers de chamada - -Enquanto os eventos provém uma forma eficiente de coletar mudanças relevantes ao estado de um contrato, muitos contratos evitam gerar logs para otimizar os custos de gas. Nestes casos, um subgraph pode se inscrever em chamadas feitas ao contrato da fonte de dados. Isto é alcançado ao definir handlers de calls que referenciam a assinatura da função, e o handler de mapeamento que processará chamadas para esta função. Para processar estas chamadas, o handler de mapeamento receberá um `ethereum.Call` como um argumento com as entradas digitadas e as saídas da chamada. Chamadas feitas a qualquer profundidade na cadeia de chamadas de uma transação irão engatilhar o mapeamento; assim, atividades com o contrato de fontes de dados serão capturados através de contratos de proxy. - -Handlers de chamadas só serão ativados em um de dois casos: quando a função especificada é chamada por uma conta que não for do próprio contrato, ou quando ela é marcada como externa no Solidity e chamada como parte de outra função no mesmo contrato. - -> **Nota:** Os handlers de chamada atualmente dependem da API de rastreamento do Parity. Certas redes, como Arbitrum e BNB Chain, não apoiam esta API. Se um subgraph que indexa uma destas redes conter um ou mais handlers de chamadas, ele não começará a sincronização. Os programadores de subgraph devem, em vez disto, usar handlers de eventos. Estes têm desempenho bem melhor que handlers de chamadas, e são apoiados em toda rede EVM. - -### Como Definir um Handler de Chamada - -Para definir um handler de chamada no seu manifest, apenas adicione um arranjo `callHandlers` sob a fonte de dados para a qual quer se inscrever. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -O `functionm` é a assinatura de função normalizada para filtrar chamadas. A propriedade `handler` é o nome da função no seu mapeamento que gostaria de executar quando a função-alvo é chamada no contrato da fonte de dados. - -### Função de Mapeamento - -Cada handler de chamadas toma um único parâmetro, que tem um tipo correspondente ao nome da função chamada. No exemplo de subgraph acima, o mapeamento contém um handler para quando a função `createGravatar` é chamada e recebe um `CreateGravatarCall` como argumento: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -A função `handleCreateGravatar` toma um novo `CreateGravatarCall` que é uma subclasse do `ethereum.Call`, fornecido pelo `@graphprotocol/graph-ts`, que inclui as entradas e saídas digitadas da chamada. O tipo `CreateGravatarCall` é gerado para ti quando executa o `graph codegen`. - -## Handlers de Blocos - -Além de se inscrever a eventos de contratos ou chamadas para funções, um subgraph também pode querer atualizar os seus dados enquanto novos blocos são afixados à chain. Para isto, um subgraph pode executar uma função após cada bloco, ou após blocos que correspondem a um filtro predefinido. - -### Filtros Apoiados - -#### Filtro Call - -```yaml -filter: - kind: call -``` - -_O handler definido será chamado uma vez para cada bloco, que contém uma chamada ao contrato (fonte de dados) sob o qual o handler está definido._ - -> **Nota:** O filtro `call` atualmente depende da API de rastreamento do Parity. Certas redes, como Arbitrum e BNB Chain, não apoiam esta API. Se um subgraph que indexa uma destas redes conter um ou mais handlers de blocos com um filtro `call`, ele não começará a sincronização. - -A ausência de um filtro para um handler de blocos garantirá que o handler seja chamado a todos os blocos. Uma fonte de dados só pode conter um handler de bloco para cada tipo de filtro. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Filtro Polling - -> **Requer `specVersion` >= 0.0.8** - -> **Nota:** Filtros de polling só estão disponíveis nas dataSources `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -O handler definido será chamado uma vez a cada `n` blocos, onde `n` é o valor providenciado no campo `every`. Esta configuração permite que o subgraph faça operações específicas em intervalos de blocos regulares. - -#### Filtro Once - -> **Requer `specVersion` >= 0.0.8** - -> **Nota:** Filtros de once só estão disponíveis nas dataSources `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -O handler definido com o filtro once só será chamado uma única vez antes da execução de todos os outros handlers (por isto, o nome "once" / "uma vez"). Esta configuração permite que o subgraph use o handler como um handler de inicialização, para realizar tarefas específicas no começo da indexação. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Função de Mapeamento - -A função de mapeamento receberá um `ethereum.block` como o seu único argumento. Assim como funções de mapeamento para eventos, esta função pode acessar entidades existentes no armazenamento do subgraph, chamar contratos inteligentes e criar ou atualizar entidades. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Eventos Anónimos - -Caso precise processar eventos anónimos no Solidity, isto é possível ao fornecer o topic 0 do evento, como no seguinte exemplo: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -Um evento só será ativado quando a assinatura e o topic 0 corresponderem. `topic0` é igual ao hash da assinatura do evento. - -## Recibos de Transação em Handlers de Eventos - -A partir do `specVersion` `0.0.5` e `apiVersion` `0.0.7`, handlers de eventos podem ter acesso ao recibo para a transação que os emitiu. - -Para fazer isto, os handlers de eventos devem ser declarados no manifest do subgraph com a nova chave `receipt: true`, sendo esta opcional e configurada normalmente para `false`. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Dentro da função do handler, o recibo pode ser acessado no campo `Event.receipt`. Quando a chave `receipt` é configurada em `false`, ou omitida no manifest, um valor `null` será retornado em vez disto. - -## Recursos experimentais - -A partir do `specVersion` `0.0.4`, os recursos de subgraph devem ser explicitamente declarados na seção `features` no maior nível do arquivo de manifest com o seu nome em `camelCase`, como listado abaixo: - -| Recurso | Nome | -| -------------------------------------------------- | ---------------- | -| [Erros não-fatais](#non-fatal-errors) | `nonFatalErrors` | -| [Busca fulltext](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Enxertos](#grafting-onto-existing-subgraphs) | `grafting` | - -Por exemplo, se um subgraph usa os recursos de **Busca Fulltext** e **Erros não-fatais**, o campo `features` no manifest deve ser: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note que usar uma ferramenta sem declará-la causará um **erro de validação** durante o lançamento de um subgraph, mas não ocorrerá nenhum erro se um recurso for declarado sem ser usado. - -### Séries de Tempo e Agregações - -Séries de tempo e agregações permitem que o seu subgraph registre estatísticas como médias diárias de preço, total de transferências por hora, etc. - -Este recurso introduz dois novos tipos de entidade de subgraph. Entidades de série de tempo registram pontos de dados com marcações de tempo. Entidades de agregação realizam cálculos pré-declarados nos pontos de dados de Séries de Tempo numa base por hora ou diária, e depois armazenam os resultados para acesso fácil via GraphQL. - -#### Exemplo de Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Definição de Série de Tempo e Agregações - -Entidades de série de tempo são definidas com `@entity(timeseries: true)` no schema.graphql. Cada entidade deste tipo deve ter uma ID única do tipo int8, uma marcação do tipo Timestamp, e inclui dados que serão usados para cálculos por entidades de agregação. Estas entidades de Série de Tempo podem ser salvas em handlers de ação regular, e agem como os "dados brutos" das entidades de Agregação. - -Entidades de agregação são definidas com `@aggregation` no schema.graphql. Toda entidade deste tipo define a fonte de qual resgatará dados (que deve ser uma entidade de Série de Tempo), determina os intervalos (por ex., hora, dia) e especifica a função de agregação que usará (por ex., soma, contagem, min, max, primeiro, último). Entidades de agregação são calculadas automaticamente na base da fonte especificada ao final do intervalo requerido. - -#### Intervalos de Agregação Disponíveis - -- `hour`: configura o período de série de tempo para cada hora, em ponto. -- `day`: configura o período de série de tempo para cada dia, a começar e terminar à meia-noite. - -#### Funções de Agregação Disponíveis - -- `sum`: Total de todos os valores. -- `count`: Número de valores. -- `min`: Valor mínimo. -- `max`: Valor máximo. -- `first`: Primeiro valor no período. -- `last`: Último valor no período. - -#### Exemplo de Query de Agregações - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Nota: - -Para utilizar Séries de Tempo e Agregações, um subgraph deve ter uma versão de especificação maior que 1.1.0. Note que este recurso pode passar por mudanças significativas que podem afetar a retrocompatibilidade. - -[Leia mais](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) sobre Séries de Tempo e Agregações. - -### Erros não-fatais - -Erros de indexação em subgraphs já sincronizados, por si próprios, farão que o subgraph falhe e pare de sincronizar. Os subgraphs podem, de outra forma, ser configurados a continuar a sincronizar na presença de erros, ao ignorar as mudanças feitas pelo handler que provocaram o erro. Isto dá tempo aos autores de subgraphs para corrigir seus subgraphs enquanto queries continuam a ser servidos perante o bloco mais recente, porém os resultados podem ser inconsistentes devido ao bug que causou o erro. Note que alguns erros ainda são sempre fatais. Para ser não-fatais, os erros devem ser confirmados como determinísticos. - -> **Nota:** A rede do The Graph ainda não apoia erros não fatais, e os programadores não devem lançar subgraphs à rede pelo Studio por esta funcionalidade. - -Permitir erros não fatais exige a configuração da seguinte feature flag no manifest do subgraph: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -A consulta também deve concordar em consultar dados que tenham possíveis inconsistências através do argumento `subgraphError`. Também vale consultar o `_meta` para verificar se o subgraph pulou erros, como no seguinte exemplo: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Caso o subgraph encontre um erro, esse query retornará tanto os dados quanto o erro no graphql com a mensagem `"indexing_error"`. Veja neste exemplo de resposta: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Como Enxertar em Subgraphs Existentes - -> **Nota:** não é recomendado usar enxertos na primeira atualização para a Graph Network. Saiba mais [aqui](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -Quando um subgraph é lançado pela primeira vez, ele começa a indexar eventos no bloco gênese da chain correspondente (ou no `startBlock` definido com cada fonte de dados). Às vezes, há vantagem em reutilizar os dados de um subgraph existente e começar a indexar em um bloco muito mais distante. Este modo de indexar é chamado de _Enxerto_. O enxerto, por exemplo, serve para passar rapidamente por erros simples nos mapeamentos durante a programação, ou consertar temporariamente um subgraph existente após ele ter falhado. - -Um subgraph é enxertado em um subgraph base quando um manifest de subgraph no `subgraph.yaml` contém um bloco `graft` no maior nível: - -```yaml -description: ... -graft: - base: Qm... # ID do subgraph base - block: 7345624 # Número do bloco -``` - -Quando é lançado um subgraph cujo manifest contém um bloco `graft`, o Graph Node copiará os dados do subgraph `base` até, e inclusive, o `block` dado, e então continuará a indexar o novo subgraph a partir daquele bloco. O subgraph base deve existir na instância-alvo do Graph Node e ter indexado até, no mínimo, o bloco dado. Devido a esta restrição, o enxerto só deve ser usado durante a programação, ou em uma emergência para acelerar a produção de um subgraph não-enxertado equivalente. - -Como o enxerto copia em vez de indexar dados base, dirigir o subgraph para o bloco desejado desta maneira é mais rápido que indexar do começo, mesmo que a cópia inicial dos dados ainda possa levar várias horas para subgraphs muito grandes. Enquanto o subgraph enxertado é inicializado, o Graph Node gravará informações sobre os tipos de entidade que já foram copiados. - -O subgraph enxertado pode usar um schema GraphQL que não é idêntico ao schema do subgraph base, mas é apenas compatível com ele. Ele deve ser um schema válido no seu próprio mérito, mas pode desviar do schema do subgraph base nas seguintes maneiras: - -- Ele adiciona ou remove tipos de entidade -- Ele retira atributos de tipos de identidade -- Ele adiciona atributos anuláveis a tipos de entidade -- Ele transforma atributos não anuláveis em atributos anuláveis -- Ele adiciona valores a enums -- Ele adiciona ou remove interfaces -- Ele muda para quais tipos de entidades uma interface é implementada - -> **[Gerenciamento de Recursos](#experimental-features):** O `grafting` deve ser declarado sob `features` no manifest do subgraph. - -## IPFS/Arweave File Data Sources - -Fontes de dados de arquivos são uma nova funcionalidade de subgraph para acessar dados off-chain de forma robusta e extensível. As fontes de dados de arquivos apoiam o retiro de arquivos do IPFS e do Arweave. - -> Isto também abre as portas para indexar dados off-chain de forma determinística, além de potencialmente introduzir dados arbitrários com fonte em HTTP. - -### Visão geral - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -Isto é parecido com os [modelos de fontes de dados existentes](/developing/creating-a-subgraph/#data-source-templates), usados para dinamicamente criar fontes de dados baseadas em chains. - -> Isto substitui a API `ipfs.cat` existente - -### Guia de atualização - -#### Atualizar `graph-ts` e `graph-cli` - -O recurso de fontes de dados de arquivos exige o graph-ts >=0.29.0 e o graph-cli >=0.33.1 - -#### Adicionar um novo tipo de entidade que será atualizado quando os arquivos forem encontrados - -Fontes de dados de arquivos não podem acessar ou atualizar entidades baseadas em chain, mas devem atualizar entidades específicas a arquivos. - -Isto pode implicar separar campos de entidades existentes em entidades separadas, ligadas juntas. - -Entidade combinada original: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Entidade nova, separada: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Se o relacionamento for perfeitamente proporcional entre a entidade parente e a entidade de fontes de dados de arquivos resultante, é mais simples ligar a entidade parente a uma entidade de arquivos resultante, com a CID IPFS como o assunto de busca. Se tiver dificuldades em modelar suas novas entidades baseadas em arquivos, pergunte no Discord! - -> É necessário usar [filtros ninhados](/querying/graphql-api/#example-for-nested-entity-filtering) para filtrar entidades parentes na base destas entidades ninhadas. - -#### Adicione um novo modelo de fonte de dados com `kind: file/ipfs` ou `kind: file/arweave` - -Esta é a fonte de dados que será gerada quando um arquivo de interesse for identificado. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Atualmente é obrigatório usar `abis`, mas não é possível chamar contratos de dentro de fontes de dados de arquivos - -A fonte de dados de arquivos deve mencionar especificamente todos os tipos de entidades com os quais ela interagirá sob `entities`. Veja [as limitações](#limitations) para mais detalhes. - -#### Criar um novo handler para processar arquivos - -Este handler deve aceitar um parâmetro `Bytes`, que consistirá dos conteúdos do arquivo; quando encontrado, este poderá ser acessado. Isto costuma ser um arquivo JSON, que pode ser processado com helpers `graph-ts` ([documentação](/developing/graph-ts/api/#json-api)). - -A CID do arquivo como um string legível pode ser acessada através do `dataSource` a seguir: - -```typescript -const cid = dataSource.stringParam() -``` - -Exemplo de handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Gerar fontes de dados de arquivos quando for obrigatório - -Agora pode criar fontes de dados de arquivos durante a execução de handlers baseados em chain: - -- Importe o modelo do `templates` autogerado -- chame o `TemplateName.create(cid: string)` de dentro de um mapeamento, onde o cid é um identificador de conteúdo válido para IPFS ou Arweave - -Para o IPFS, o Graph Node apoia [identificadores de conteúdo v0 e v1](https://docs.ipfs.tech/concepts/content-addressing/) e identificadores com diretórios (por ex. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -Para o Arweave, desde a versão 0.33.0, o Graph Node pode resgatar arquivos armazenados no Arweave com base na sua [ID de transação](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) de um gateway do Arweave ([exemplo de arquivo](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). O Arweave apoia transações enviadas via Irys (antigo Bundlr), e o Graph Node também pode resgatar arquivos com base em [manifests do Irys](https://docs.irys.xyz/overview/gateways#indexing). - -Exemplo: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Este exemplo de código é para um subgraph do Crypto Coven. O hash ipfs acima é um diretório com metadados de tokens para todos os NFTs do Crypto Coven. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //Isto cria um caminho aos metadados para um único NFT do Crypto Coven. Ele concatena o diretório com "/" + nome do arquivo + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -Isto criará uma fonte de dados de arquivos, que avaliará o endpoint de IPFS ou Arweave configurado do Graph Node, e tentará novamente caso não achá-lo. Com o arquivo localizado, o handler da fonte de dados de arquivos será executado. - -Este exemplo usa a CID como a consulta entre a entidade parente `Token` e a entidade `TokenMetadata` resultante. - -> Anteriormente, este era o ponto em que qual um programador de subgraph teria chamado o `ipfs.cat(CID)` para resgatar o arquivo - -Parabéns, você está a usar fontes de dados de arquivos! - -#### Como lançar os seus Subgraphs - -Agora, pode construir (`build`) e lançar (`deploy`) seu subgraph a qualquer Graph Node >=v0.30.0-rc.0. - -#### Limitações - -Handlers e entidades de fontes de dados de arquivos são isolados de outras entidades de subgraph, o que garante que sejam determinísticos quando executados e que não haja contaminação de fontes de dados baseadas em chain. Especificamente: - -- Entidades criadas por Fontes de Dados de Arquivos são imutáveis, e não podem ser atualizadas -- Handlers de Fontes de Dados de Arquivos não podem acessar entidades de outras fontes de dados de arquivos -- Entidades associadas com Fontes de Dados de Arquivos não podem ser acessadas por handlers baseados em chain - -> Enquanto esta limitação pode não ser problemática para a maioria dos casos de uso, ela pode deixar alguns mais complexos. Se houver qualquer problema neste processo, por favor dê um alô via Discord! - -Além disto, não é possível criar fontes de dados de uma fonte de dado de arquivos, seja uma on-chain ou outra fonte de dados de arquivos. Esta restrição poderá ser retirada no futuro. - -#### Boas práticas - -Caso ligue metadados de NFTs a tokens correspondentes, use o hash IPFS destes para referenciar uma entidade de Metadados da entidade do Token. Salve a entidade de Metadados a usar o hash IPFS como ID. - -É possível usar o [contexto DataSource](/developing/graph-ts/api/#entity-and-datasourcecontext) ao criar Fontes de Dados de Arquivos para passar informações extras, que estarão disponíveis ao handler de Fontes de Dados de Arquivos. - -Caso tenha entidades a ser atualizadas várias vezes, crie entidades únicas baseadas em arquivos utilizando o hash IPFS & o ID da entidade, e as referencie com um campo derivado na entidade baseada na chain. - -> Estamos a melhorar a recomendação acima, para que os queries retornem apenas a versão "mais recente" - -#### Problemas conhecidos - -Atualmente, fontes de dados de arquivos requerem ABIs, apesar destas não serem usadas ([problema no GitHub](https://github.com/graphprotocol/graph-cli/issues/961)). A solução é adicionar qualquer ABI. - -Handlers para Fontes de Dados de Arquvios não podem estar em arquivos que importam ligações de contrato `eth_call`, o que causa falhas com "unknown import: `ethereum::ethereum.call` has not been defined" ([problema no GitHub](https://github.com/graphprotocol/graph-node/issues/4309)). A solução é criar handlers de fontes de dados de arquivos num arquivo dedicado. - -#### Exemplos - -[Migração de subgraph do Crypto Coven](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Referências - -[Fontes de Dados de Arquivos GIP](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/pt/developing/creating-a-subgraph/_meta.js b/website/pages/pt/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/pt/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/pt/developing/graph-ts/_meta.js b/website/pages/pt/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/pt/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/pt/managing/deprecate-a-subgraph.mdx b/website/pages/pt/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/pt/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/pt/mips-faqs.mdx b/website/pages/pt/mips-faqs.mdx deleted file mode 100644 index 1408b61422ac..000000000000 --- a/website/pages/pt/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Perguntas frequentes sobre Provedores de Infraestrutura de Migração (MIPs) ---- - -## Introdução - -> Nota: O programa de MIPs fechou em maio de 2023. Agradecemos a todos os Indexadores que participaram! - -É uma boa época para participar do ecossistema do The Graph! Durante o [Graph Day 2022](https://thegraph.com/graph-day/2022/), Yaniv Tal anunciou a [aposentadoria do serviço hospedado](https://thegraph.com/blog/sunsetting-hosted-service/), um momento para o qual o ecossistema do The Graph se preparou por muitos anos. - -Para apoiar o desligamento do serviço hospedado e a migração de toda a sua atividade à rede descentralizada, a Graph Foundation anunciou o [programa de Provedores de Infraestrutura de Migração (MIPs)](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -O programa de MIPs é um programa de incentivos para Indexadores, para apoiá-los com recursos para indexar chains além da mainnet Ethereum e ajudar o protocolo The Graph a expandir a rede descentralizada numa camada de infraestrutura multi-chain. - -O programa de MIPs alocou 0,75% da reserva de GRT (75 milhões de GRT), com 0.5% reservados para recompensar Indexadores que contribuam à inicialização da rede e 0.25% alocados a bolsas de rede para programadores de subgraph a usar subgraphs multi-chain. - -### Recursos Úteis - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. É possível gerar uma Prova de Indexação (POI) válida mesmo se um subgraph falhar? - -Sim, é possível. - -Para mais contexto, a carta de arbitragem — [mais sobre ela aqui](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) — especifica a metodologia de gerar uma POI para um subgraph falho. - -Um membro da comunidade, [SunTzu](https://github.com/suntzu93), criou um script para automatizar este processo conforme a metodologia da carta de arbitragem. Confira o repo [aqui](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Qual chain será incentivada primeiro pelo programa de MIPs? - -A primeira chain a ser apoiada na rede descentralizada é a Gnosis Chain! Antigamente conhecida como xDAI, a Gnosis Chain é baseada em EVM. Ela é a primeira por conta da sua facilidade em executar nodes; sua prontidão para Indexadores; alinhamento com o The Graph; e a sua adoção dentro da web3. - -### 3. Como serão adicionadas novas chains ao programa de MIPs? - -Novas chains serão anunciadas durante o programa de MIPs — com base na prontidão aos Indexadores, na demanda, e no sentimento da comunidade. Chains serão apoiadas primeiro na testnet, e depois, uma GIP será passada para apoiar aquela chain na mainnet. Os Indexadores participantes escolherão quais chains eles têm interesse em apoiar e serão recompensados por chain; também ganharão taxas de query e recompensas de indexação na rede por servir subgraphs. Participantes no MIPs serão pontuados com base em seu desempenho, habilidade de servir às necessidades da rede, e apoio da comunidade. - -### 4. Como saberemos quando a rede estará pronta para uma nova chain? - -A Graph Foundation monitorizará as métricas de desempenho de qualidade de serviço, o desempenho da rede, e os canais comunitários para melhor avaliar a prontidão. A prioridade é garantir que a rede atenda às necessidades da comunidade para que estes dApps multi-chain possam migrar os seus subgraphs. - -### 5. Como as recompensas são divididas por chain? - -Sabendo que chains variam em seus requisitos para a sincronização de nodes, e que diferem em volume de queries e adoção, as recompensas por chain serão decididas no fim do ciclo dessa chain para garantir o registo de todo o feedback e aprendizado. Porém, a toda hora, os Indexadores também poderão ganhar taxas de query e recompensas de indexação quando a chain for apoiada na rede. - -### 6. Nós precisamos indexar todas as redes no programa de MIPs ou podemos só escolher uma chain e indexar esta? - -Podes indexar quais chains quiser! A meta do programa de MIPs é equipar os Indexadores com as ferramentas e conhecimento para indexar as chains que desejam e apoiar os ecossistemas web3 nos quais têm interesse. Porém, para cada chain, há fases da testnet à mainnet. Complete todas as fases para as chains que indexa. Aprenda mais sobre as fases na [página do Notion dos MIPs](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059). - -### 7. Quando as recompensas serão distribuídas? - -As recompensas para MIPs serão distribuídas por chain quando as métricas de desempenho forem alcançadas e os subgraphs migrados forem apoiados por esses Indexadores. Fique atento a informações sobre as recompensas totais por chain na metade do ciclo daquela chain. - -### 8. Como a pontuação funciona? - -Os indexadores competirão por recompensas baseadas na sua classificação de pontos do programa. A pontuação será baseada em: - -**Cobertura de Subgraphs** - -- Está a providenciar o apoio máximo para subgraphs por chain? - -- Durante os MIPs, espera-se que grandes Indexadores façam staking de 50% ou mais por chain apoiada. - -**Qualidade de Serviço** - -- O Indexador serve uma boa Qualidade de Serviço à chain (latência, dados novos, uptime, etc.)? - -- O Indexador que apoia programadores de dApp é reativo às necessidades deles? - -O Indexador aloca com eficiência, para contribuir à saúde geral da rede? - -**Suporte da Comunidade** - -- O Indexador colabora com os seus colegas para ajudá-los a se preparar para a multi-chain? - -- O Indexador fornece opiniões para programadores importantes no programa, ou compartilha informações com Indexadores no Fórum? - -### 9. Como o papel no Discord será atribuído? - -Os moderadores darão os papeis nos dias a seguir. - -### 10. Vale começar o programa em uma testnet e depois trocar para a Mainnet? Poderão identificar o meu node e levá-lo em conta enquanto distribuem recompensas? - -Sim, isto é esperado. Várias fases são no Görli, e uma é na mainnet. - -### 11. Em qual ponto espera-se que os participantes adicionem um lançamento na mainnet? - -Será obrigatório ter um indexador da mainnet durante a terceira fase. Mais informações [nesta página do Notion.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. As recompensas serão sujeitas ao vesting? - -A porção a ser distribuída no fim do programa será sujeita ao vesting. Falaremos mais sobre isto no Acordo do Indexador. - -### 13. Em equipas com mais de um membro, todos os membros da equipa receberão um papel de MIPs no Discord? - -Sim - -### 14. Dá para usar os tokens trancados no programa de curadoria do The Graph para participar na testnet de MIPs? - -Sim - -### 15. Durante o programa de MIPs, haverá um período para disputar POIs inválidas? - -Isto ainda será decidido. Fique atento a esta página para mais detalhes; se o seu pedido for urgente, mande uma mensagem para info@thegraph.foundation - -### 17. Podemos combinar dois contratos de vesting? - -Não. As opções são: delegar um para o outro, ou executar dois indexadores diferentes. - -### 18. Perguntas de KYC (Conheça o seu Cliente)? - -Mande uma mensagem para info@thegraph.foundation - -### 19. Ainda não estou pronto para indexar a Gnosis Chain, posso começar a indexar de outra chain quando estiver pronto? - -Sim - -### 20. Há regiões recomendadas para executar os servidores? - -Não damos recomendações sobre regiões. Ao escolher locais, pense sobre onde ficam os maiores mercados para criptomoedas. - -### 21. O que é "custo de gas de handler"? - -É a medida determinística do custo de executar um handler. Ao contrário do que diz o nome, isto não tem a ver com o custo de gas em blockchains. diff --git a/website/pages/pt/querying/_meta.js b/website/pages/pt/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/pt/querying/_meta.js +++ b/website/pages/pt/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/pt/querying/graph-client/_meta.js b/website/pages/pt/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/pt/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ro/_meta.js b/website/pages/ro/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/ro/_meta.js +++ b/website/pages/ro/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/ro/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ro/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/ro/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/ro/deploying/hosted-service.mdx b/website/pages/ro/deploying/hosted-service.mdx deleted file mode 100644 index 1242d8fec248..000000000000 --- a/website/pages/ro/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Creează un Subgraf - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/ro/deploying/subgraph-studio.mdx b/website/pages/ro/deploying/subgraph-studio.mdx deleted file mode 100644 index f2da63abff0b..000000000000 --- a/website/pages/ro/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/ro/developing/creating-a-subgraph.mdx b/website/pages/ro/developing/creating-a-subgraph.mdx deleted file mode 100644 index e38d897919f8..000000000000 --- a/website/pages/ro/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Overview - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ro/developing/creating-a-subgraph/_meta.js b/website/pages/ro/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/ro/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ro/developing/graph-ts/_meta.js b/website/pages/ro/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/ro/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ro/managing/deprecate-a-subgraph.mdx b/website/pages/ro/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/ro/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/ro/mips-faqs.mdx b/website/pages/ro/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/ro/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/ro/querying/_meta.js b/website/pages/ro/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/ro/querying/_meta.js +++ b/website/pages/ro/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/ro/querying/graph-client/_meta.js b/website/pages/ro/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/ro/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ru/_meta.js b/website/pages/ru/_meta.js index 2aa3c4a9b466..f2f3b56163a5 100644 --- a/website/pages/ru/_meta.js +++ b/website/pages/ru/_meta.js @@ -1,6 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), - tokenomics: 'Токеномика', + ...meta, } diff --git a/website/pages/ru/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ru/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 1a425a43f781..000000000000 --- a/website/pages/ru/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,293 +0,0 @@ ---- -title: Развертывание подграфа Arweave в Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Хранение идентификатора доступа - -После создания учетной записи перейдите к своему [дашборду](https://thegraph.com/hosted-service/dashboard). Скопируйте идентификатор доступа, отображаемый на панели, и запустите `graph auth --product hosted-service `. Это сохранит идентификатор на вашем компьютере. Вам нужно сделать это только один раз, или если вы когда-либо повторно создадите идентификатор доступа. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Выберите изображение, которое будет использоваться в качестве изображения предварительного просмотра и миниатюры для подграфа. - -**Subgraph Name** - Вместе с именем учетной записи, под которым создается подграф, это также определит `account-name/subgraph-name`-имя стиля, используемого для развертываний и эндпоинтов GraphQL. _Это поле не может быть изменено позже._ - -** Account ** - учетная запись, под которой создается подграф. Это может быть учетная запись физического лица или организации. _Подграфы позже нельзя будет перемещать между учетными записями._ - -**Subtitle** - текст, который будет отображаться в виде карт подграфа. - -** Description ** - Описание подграфа, видимое на странице сведений о подграфе. - -**URL GitHub** - ссылка на subgraph репозиторий на GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -Для развертывания подграфа выполните команду `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -Статус подграфа переключается на `Synced`, как только Graph Node извлечет все данные из исторических блоков. The Graph Node будет продолжать проверять блоки для вашего подграфа по мере добывания этих блоков. - -## Повторное развертывание подграфа - -При внесении изменений в определение подграфа, например, для устранения проблемы в сопоставлении объектов, выполните команду `yarn deploy` выше, чтобы снова установить обновленную версию подграфа. Любое обновление подграфа требует, чтобы Graph Node заново проиндексировал весь ваш подграф, снова начиная с genesis блока. - -Если ваш ранее развернутый подграф все еще находится в статусе `Syncing`, он будет немедленно заменен на недавно развернутую версию. Если ранее развернутый подграф уже полностью синхронизирован, нода Graph пометит вновь развернутую версию как `Pending Version`, синхронизирует ее в фоновом режиме и заменит текущую развернутую версию новой только после завершения синхронизации новой версии. Это гарантирует, что у вас есть подграф для работы во время синхронизации новой версии. - -## Развертывание подграфа в нескольких сетях - -В некоторых случаях вы захотите развернуть один и тот же подграф в нескольких сетях, не дублируя весь его код. Основная проблема, возникающая при этом, заключается в том, что адреса контрактов в этих сетях разные. - -### Использование graph-cli - -Как `graph build` (начиная с `v0.29.0`), так и `graph deploy` (начиная с `v0.32.0`) допускают две новые опции: - -```sh - ... - --network Конфигурация сети для использования из файла конфигурации сетей - --network-file Путь к файлу конфигурации сетей (по умолчанию: "./networks.json") -``` - -Вы можете использовать опцию `--network`, чтобы указать конфигурацию сети из стандартного файла `json` (по умолчанию используется `networks.json`), чтобы легко обновлять свой подграф во время разработки. - -**Примечание:** Команда `init` теперь автоматически сгенерирует `networks.json` на основе предоставленной информации. Затем вы сможете обновить существующие или добавить дополнительные сети. - -Если у вас нет файла `networks.json`, вам нужно будет вручную создать его со следующей структурой: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Примечание:** Вам не нужно указывать ни один из `templates` (если они у вас есть) в файле конфигурации, только `dataSources`. Если есть какие-либо `templates`, объявленные в файле `subgraph.yaml`, их сеть будет автоматически обновлена до указанной с помощью опции `--network`. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Вот как должен выглядеть файл конфигурации ваших сетей: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Теперь мы можем запустить одну из следующих команд: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Теперь вы готовы к `yarn deploy`. - -**Примечание:** Как упоминалось ранее, поскольку `graph-cli 0.32.0` вы можете напрямую запустить `yarn deploy` с опцией `--network`: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Использование шаблона subgraph.yaml - -Одним из решений для старых версий graph-cli, которое позволяет параметризовать такие аспекты, как адреса контрактов, является генерация его частей с использованием системы шаблонов, такой как [Mustache](https://mustache.github.io/) или [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -и - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Наряду с этим, необходимо заменить имя сети и адреса в манифесте на variable placeholders `{{network}}` и `{{address}}` и переименовать манифест, например, `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Чтобы сформировать манифест для любой сети, вы можете добавить две дополнительные команды в `package.json` вместе с dependency от `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -Рабочий пример этого можно найти [здесь](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Примечание:** Этот подход также может быть применен к более сложным ситуациям, где необходимо заменить больше, чем адреса контрактов и сетевые имена, или где также генерируются сопоставления или ABI из шаблонов. - -## Проверка работоспособности подграфа - -Если подграф успешно синхронизируется, это хороший признак того, что он будет работать надёжно. Однако новые триггеры в сети могут привести к тому, что ваш подграф попадет в состояние непроверенной ошибки, или он может начать отставать из-за проблем с производительностью или проблем с операторами нод. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -Это даст вам `chainHeadBlock`, который вы можете сравнить с `latestBlock` на вашем подграфе, чтобы проверить, отстает ли он. `synced` сообщает, попадал ли когда-либо подграф в сеть. `health` в настоящее время может принимать значения `healthy`, если ошибок не произошло, или `failed`, если произошла ошибка, которая остановила работу подграфа. В этом случае вы можете проверить поле `FatalError` для получения подробной информации об этой ошибке. - -## Политика архивирования подграфов в Hosted Service - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Политика архивирования подграфов в Subgraph Studio - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -У каждого подграфа, затронутого этой политикой, есть возможность вернуть соответствующую версию обратно. diff --git a/website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index e4be6b26f36d..000000000000 --- a/website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Развертывание подграфа в Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Установите The Graph CLI (либо с помощью yarn, либо с npm) -- Создайте ваш подграф в Subgraph Studio -- Аутентифицируйте свою учетную запись с помощью CLI -- Развертывание подграфа в Subgraph Studio - -## Установка Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Установка с помощью yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Установка с помощью npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Создайте ваш подграф в Subgraph Studio - -Перед развертыванием вашего фактического подграфа вам необходимо создать подграф в [Subgraph Studio](https://thegraph.com/studio/). Мы рекомендуем вам ознакомиться с нашей [документацией](/deploying/subgraph-studio), чтобы узнать больше об этом. - -## Инициализация вашего подграфа - -Как только ваш подграф будет создан в Subgraph Studio, вы можете инициализировать код подграфа с помощью этой команды: - -```bash -graph init --studio -``` - -Значение `` можно найти на странице сведений о вашем подграфе в Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -После запуска `graph init` вам будет предложено ввести адрес контракта, сеть и ABI, которые вы хотите запросить. Выполнение этого приведет к созданию новой папки на вашем локальном компьютере с некоторым базовым кодом для начала работы над вашим подграфом. Затем вы можете доработать свой подграф, чтобы убедиться, что он работает должным образом. - -## Аутентификация в Graph - -Прежде чем вы сможете развернуть свой подграф в Subgraph Studio, вам необходимо войти в свою учетную запись в CLI. Для этого вам понадобится ваш ключ, который вы можете найти на своей странице "My Subgraphs" или на странице сведений о вашем подграфе. - -Вот команда, которую вам нужно использовать для аутентификации из CLI: - -```bash -graph auth --studio -``` - -## Развертывание подграфа в Subgraph Studio - -Как только вы будете готовы, вы можете развернуть свой подграф в Subgraph Studio. Это не приведет к публикации вашего подграфа в децентрализованной сети, а только к его развертыванию в вашей учетной записи Studio, где вы сможете протестировать его и обновить метаданные. - -Вот команда CLI, которую вам нужно использовать для развертывания вашего подграфа. - -```bash -graph deploy --studio -``` - -После выполнения этой команды CLI запросит обозначение версии, вы можете назвать ее так, как хотите, вы можете использовать такие, как `0.1` и `0.2` или также использовать буквы, такие как `uniswap-v2-0.1`. Обозначение будут видны в Graph Explorer и могут быть использованы кураторами, чтобы решить, хотят ли они подавать сигнал на эту версию или нет, поэтому выбирайте их с умом. - -После развертывания вы можете протестировать свой подграф в Subgraph Studio с помощью тестовой площадки, при необходимости развернуть другую версию, обновить метаданные и, когда будете готовы, опубликовать свой подграф в Graph Explorer. diff --git a/website/pages/ru/deploying/hosted-service.mdx b/website/pages/ru/deploying/hosted-service.mdx deleted file mode 100644 index 6a241099f9c0..000000000000 --- a/website/pages/ru/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Что такое Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Создать подграф - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### Из существующего контракта - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -В данном случае `` - это имя вашего пользователя или организации на GitHub, `` - имя вашего подграфа, а `` - необязательное имя каталога, куда `graph init` поместит пример манифеста подграфа. `` - это адрес вашего существующего контракта. `` - это имя сети, в которой находится контракт. `` - это локальный путь к ABI-файлу контракта. ** И `--network`, и `--abi` являются необязательными.** - -### Из примера подграфа - -Второй режим, который поддерживает `graph init`, - это создание нового проекта из примера подграфа. Следующая команда делает это: - -``` -graph init --from-example --product hosted-service / [] -``` - -Пример подграфа основан на контракте Gravity от Dani Grant, который управляет пользовательскими аватарами и выдает события `NewGravatar` или `UpdateGravatar` всякий раз, когда создаются или обновляются аватары. Подграф обрабатывает эти события, записывая объекты `Gravatar` в хранилище нод Graph и обеспечивая их обновление в соответствии с событиями. Перейдите к [манифесту подграфа](/developing/creating-a-subgraph#the-subgraph-manifest), чтобы лучше понять, на какие события из ваших смарт-контрактов следует обратить внимание, маппинг и многое другое. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -Вы можете найти список поддерживаемых сетей [здесь](/developing/supported-networks). diff --git a/website/pages/ru/deploying/subgraph-studio.mdx b/website/pages/ru/deploying/subgraph-studio.mdx deleted file mode 100644 index 0b912f25c884..000000000000 --- a/website/pages/ru/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Добро пожаловать на вашу новую пусковую площадку 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Создать подграф с помощью Studio UI -- Развернуть подграф с помощью CLI -- Опубликовать подграф с помощью Studio UI -- Протестировать это на тестовой площадке -- Интегрировать его в промежуточный этап, используя URL-адрес запроса -- Создание и управление ключами API для определенных подграфов - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Запросы к подграфам генерируют плату за запросы, которая используется для вознаграждения [индексаторов](/network/indexing) в сети Graph. Если вы являетесь разработчиком dapp или разработчиком подграфов, Studio поможет вам создать лучшие подграфы для обеспечения запросов для вас или вашего сообщества. Studio состоит из 5 основных частей: - -- Элементы управления вашей учетной записью пользователя -- Список созданных вами подграфов -- Раздел для управления, просмотра подробностей и визуализации статуса конкретного подграфа -- Раздел для управления API ключами, которые понадобятся для выполнения запросов к подграфу -- Раздел для управления биллингом - -## Как создать свою учетную запись - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Как только вы войдете в систему, вы увидите свой уникальный ключ на домашней странице вашей учетной записи. Это позволит вам либо публиковать свои подграфы, либо управлять ключами API + биллингом. У вас будет уникальный ключ, который можно восстановить, если вы считаете, что он был скомпрометирован. - -## How to Create a Subgraph in Subgraph Studio - - - -## Совместимость подграфов с сетью Graph - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Индексировать [поддерживаемую сеть](/developing/supported-networks) -- Не должны использовать ни одну из следующих функций: - - ipfs.cat & ipfs.map - - Неисправимые ошибки - - Grafting - -Дополнительные функции и сети будут добавляться в сеть The Graph постепенно. - -### Жизненный цикл подграфа - -![Жизненный цикл подграфа](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Тестирование вашего подграфа в Subgraph Studio - -Если вы хотите протестировать ваш подграф перед публикацией в сети, вы можете сделать это в ** Subgraph Playground** или просмотреть свои логи. Логи подграфа сообщат вам, **где** ваш подграф терпит неудачу в том случае, если это произойдет. - -## Публикация вашего подграфа в Subgraph Studio - -Вы зашли так далеко - поздравляю! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Ознакомьтесь также с видео обзором ниже: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Индексаторы должны предоставить Proof of Indexing записей по определенному хэшу блока. Поскольку публикация подграфа - это действие, выполняемое внутри сети, помните, что транзакция может занять до нескольких минут. Любой адрес, который вы используете для публикации контракта, будет единственным, способным публиковать будущие версии. Выбирайте с умом! - -Подграфы с кураторским сигналом показываются индексаторам, чтобы они могли быть проиндексированы в децентрализованной сети. Вы можете публиковать подграфы и сигнал в одной транзакции, что позволяет вам минтить первый сигнал кураторства на подграф и экономить на стоимости газа. Добавляя свой сигнал к сигналам, позже предоставленным кураторами, ваш подграф также будет иметь более высокие шансы на конечное обслуживание запросов. - -**Теперь, когда вы опубликовали свой подграф, давайте разберемся, как управлять им на регулярной основе.** Обратите внимание, что вы не можете опубликовать свой подграф в сеть, если он не прошел синхронизацию. Обычно это происходит потому, что в подграфе есть ошибки - журналы покажут вам, где эти проблемы имеются! - -## Управление версиями вашего подграфа с помощью CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Обратите внимание, что публикация новой версии подграфа в сети связана с определенными расходами. Помимо платы за транзакции, разработчики должны также оплачивать часть сбора за кураторство по сигналу автоматической миграции. Вы не можете опубликовать новую версию своего подграфа, если кураторы не подали на него сигнал. Подробнее о рисках кураторства читайте [здесь](/network/curating). - -### Автоматическое архивирование версий подграфа - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Разархивирование](/img/Unarchive.png) diff --git a/website/pages/ru/developing/creating-a-subgraph.mdx b/website/pages/ru/developing/creating-a-subgraph.mdx deleted file mode 100644 index 977669732774..000000000000 --- a/website/pages/ru/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Создание субграфа ---- - -Субграф извлекает данные из блокчейна, обрабатывает их и сохраняет таким образом, чтобы их можно было легко запросить с помощью GraphQL. - -![Определение субграфа](/img/defining-a-subgraph.png) - -Определение субграфа состоит из нескольких файлов: - -- `subgraph.yaml`: файл YAML, содержащий манифест субграфа - -- `schema.graphql`: схема GraphQL, которая определяет, какие данные хранятся для Вашего субграфа и как их запрашивать через GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript), который преобразует данные события в объекты, определенные в Вашей схеме (например, `mapping.ts` в этом руководстве) - -> Чтобы использовать свой субграф в децентрализованной сети The Graph, Вам необходимо [создать ключ API](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). Рекомендуется [добавить сигнал](/network/curating/#how-to-signal) в свой субграф как минимум с [3000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Прежде чем Вы перейдете к подробному описанию содержимого файла манифеста, Вам необходимо установить[Graph CLI](https://github.com/graphprotocol/graph-tooling), который понадобится для создания и развертывания субграфа. - -## Установка Graph CLI - -The Graph CLI написан на JavaScript, и для его использования необходимо установить либо `yarn`, либо `npm`; в дальнейшем предполагается, что у Вас есть yarn. - -Получив `yarn`, установите Graph CLI, запустив следующие команды - -**Установка с помощью yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Установка с помощью npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -После установки команду `graph init` можно использовать для настройки нового проекта субграфа либо из существующего контракта, либо из примера субграфа. Эту команду можно использовать для создания субграфа в Subgraph Studio, передав в `graph init --product subgraph-studio`. Если у Вас уже есть смарт-контракт, развернутый в выбранной Вами сети, загрузка нового субграфа из этого контракта может быть хорошим способом начать работу. - -## Из существующего контракта - -Следующая команда создает субграф, который индексирует все события существующего контракта. Он пытается получить ABI контракта из Etherscan и возвращается к запросу пути к локальному файлу. Если какой-либо из необязательных аргументов отсутствует, он проведет Вас через интерактивную форму. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` - это идентификатор Вашего субграфа в Subgraph Studio, его можно найти на странице сведений о субграфе. - -## Из примера субграфа - -Второй режим, который поддерживает `graph init`, - это создание нового проекта из примера субграфа. Это делает следующая команда: - -```sh -graph init --studio -``` - -[Пример субграфа](https://github.com/graphprotocol/example-subgraph) основан на контракте Gravity Дэни Гранта, который управляет пользовательскими аватарами и генерирует события `NewGravatar` или `UpdateGravatar` при создании или обновлении аватаров. Субграф обрабатывает эти события, записывая объекты `Gravatar` в хранилище Graph Node и обеспечивая их обновление в соответствии с событиями. В следующих разделах будут рассмотрены файлы, составляющие манифест субграфа для этого примера. - -## Добавление новых источников данных к существующему субграфу - -Начиная с `v0.31.0` `graph-cli` поддерживает добавление новых источников данных к существующему субграфу с помощью команды `graph add`. - -```sh -graph add
[] - -Опции: - - --abi Путь к контракту ABI (default: download from Etherscan) - --contract-name Имя контракта (default: Contract) - --merge-entities Следует ли объединять объекты с одинаковым именем (default: false) - --network-file Путь к файлу конфигурации сети (default: "./networks.json") -``` - -Команда `add` извлечёт ABI из Etherscan (если путь к ABI не указан с помощью опции `--abi`) и создаст новый `dataSource` таким же образом, как `graph init` создает `dataSource` `--from-contract`, соответствующим образом обновляя схему и мэппинги. - -Параметр `--merge-entities` определяет, как разработчик хотел бы обрабатывать конфликты имен `entity` и `event`: - -- Если `true`: новый `dataSource` должен использовать существующие ` eventHandlers` & `entities`. -- Если `false`: следует создать новую сущность и обработчик событий с помощью `${dataSourceName}{EventName}`. - -Контракт `address` будет записан в `networks.json` для соответствующей сети. - -> **Примечание:** При использовании интерактивного интерфейса командной строки после успешного запуска `graph init` Вам будет предложено добавить новый `dataSource`. - -## Манифест субграфа - -Манифест субграфа `subgraph.yaml` определяет смарт-контракты, которые индексирует Ваш субграф, на какие события из этих контрактов следует обращать внимание и как сопоставлять данные событий с объектами, которые хранит и позволяет запрашивать Graph Node. Полную спецификацию манифестов субграфов можно найти [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -Для примера субграфа `subgraph.yaml`: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -Важными элементами манифеста, которые необходимо обновить, являются: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: понятное описание того, что представляет собой субграф. Это описание отображается в Graph Explorer при развертывании субграфа в Subgraph Studio. - -- `repository`: URL-адрес репозитория, в котором можно найти манифест субграфа. Это также отображается в The Graph Explorer. - -- `features`: список всех используемых имен [функций](#experimental-features). - -- `indexerHints.prune`: определяет срок хранения исторических данных блока для субграфа. См. [сокращение](#prune) в разделе [indexerHints](#indexer-hints). - -- `dataSources.source`: адрес смарт-контракта, источники субграфа и ABI смарт-контракта для использования. Адрес необязателен; отсутствие этого параметра позволяет индексировать совпадающие события из всех контрактов. - -- `dataSources.source.startBlock`: необязательный номер блока, с которого источник данных начинает индексацию. В большинстве случаев мы предлагаем использовать блок, в котором был создан контракт. - -- `dataSources.source.endBlock`: необязательный номер блока, индексирование которого прекращается источником данных, включая этот блок. Минимальная требуемая версия спецификации: `0.0.9`. - -- `dataSources.context`: пары «ключ-значение», которые можно использовать внутри мэппингов субграфов. Поддерживает различные типы данных, такие как `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` и `BigInt`. Для каждой переменной нужно указать ее `type` и `data`. Эти контекстные переменные затем становятся доступными в файлах мэппинга, предлагая больше настраиваемых параметров для разработки субграфов. - -- `dataSources.mapping.entities`: объекты, которые источник данных записывает в хранилище. Схема для каждого объекта определена в файле schema.graphql. - -- `dataSources.mapping.abis`: один или несколько именованных файлов ABI для исходного контракта, а также любых других смарт-контрактов, с которыми Вы взаимодействуете из мэппингов. - -- `DataSources.mapping.EventHandlers`: перечисляет события смарт—контракта, на которые реагирует этот субграф, и обработчики в мэппинге —./src/mapping.ts в примере - которые преобразуют эти события в объекты в хранилище. - -- `DataSources.mapping.callHandlers`: перечисляет функции смарт-контракта, на которые реагирует этот субграф, и обработчики в мэппинге, которые преобразуют входные и выходные данные для вызовов функций в объекты в хранилище. - -- `dataSources.mapping.blockHandlers`: перечисляет блоки, на которые реагирует этот субграф, и обработчики в мэппинг, которые запускаются при добавлении блока в чейн. Без фильтра обработчик блока будет запускаться для каждого блока. Дополнительный фильтр вызовов может быть предоставлен путем добавления в обработчик поля `filter` с `kind: call `. Обработчик будет запущен только в том случае, если блок содержит хотя бы один вызов контракта источника данных. - -Один субграф может индексировать данные из нескольких смарт-контрактов. Добавьте в массив `dataSources` запись для каждого контракта, данные которого нужно проиндексировать. - -### Порядок запуска обработчиков - -Триггеры для источника данных внутри блока упорядочиваются с помощью следующего процесса: - -1. Триггеры событий и вызовов сначала упорядочиваются по индексу транзакции внутри блока. -2. Триггеры событий и вызовов в рамках одной транзакции упорядочиваются по следующему принципу: сначала триггеры событий, затем триггеры вызовов, причем для каждого типа соблюдается тот порядок, в котором они определены в манифесте. -3. Триггеры блоков запускаются после триггеров событий и вызовов в том порядке, в котором они определены в манифесте. - -Эти правила оформления заказа могут быть изменены. - -> **Примечание:** При создании нового [динамического источника данных](#data-source-templates-for-dynamically-created-contracts) обработчики, определенные для динамических источников данных, начнут обработку только после обработки всех существующих обработчиков источников данных и будут повторяться в той же последовательности при каждом запуске. - -### Фильтры индексированных аргументов/фильтры тем - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### Как работают фильтры тем - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// Идентификатор лицензии SPDX: MIT -pragma solidity ^0.8.0; - -contract Token { - // Объявление события с индексируемыми параметрами для адресов - event Transfer(address indexed from, address indexed to, uint256 value); - - // Функция для имитации передачи токенов - function transfer(address to, uint256 value) public { - // Генерация события Transfer с указанием from, to и value - emit Transfer(msg.sender, to, value); - } -} -``` - -В этом примере: - -- Событие `Transfer` используется для протоколирования транзакций токенов между адресами. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- Функция `transfer` — это простое представление действия передачи токена, которое генерирует событие Transfer при каждом вызове. - -#### Конфигурация в субграфах - -Фильтры тем определяются непосредственно в конфигурации обработчика событий в манифесте субграфа. Вот как они настроены: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -В этой настройке: - -- `topic1` соответствует первому индексированному аргументу события, `topic2` — второму, а `topic3` — третьему. -- Каждая тема может иметь одно или несколько значений, и событие обрабатывается только в том случае, если оно соответствует одному из значений в каждой указанной теме. - -##### Логика фильтра - -- В рамках одной темы: логика действует как условие OR. Событие будет обработано, если оно соответствует любому из перечисленных значений в данной теме. -- Между разными темами: логика функционирует как условие AND. Событие должно удовлетворять всем указанным условиям в разных темах, чтобы вызвать соответствующий обработчик. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -В данной конфигурации: - -- `topic1` настроен на фильтрацию событий `Transfer`, где `0xAddressA` является отправителем. -- `topic2` настроен на фильтрацию событий `Transfer`, где `0xAddressB` является получателем. -- Субграф будет индексировать только транзакции, которые происходят непосредственно от `0xAddressA` к `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -В данной конфигурации: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Версия | Примечания к релизу | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Получение ABIs - -Файл(ы) ABI должен(ы) соответствовать Вашему контракту (контрактам). Существует несколько способов получения файлов ABI: - -- Если Вы создаете свой собственный проект, у Вас, скорее всего, будет доступ к наиболее актуальным ABIS. -- Если Вы создаете субграф для публичного проекта, Вы можете загрузить этот проект на свой компьютер и получить ABI, используя [`truffle compile`](https://truffleframework.com/docs/truffle/overview) или используя solc для компиляции. -- Вы также можете найти ABI на [Etherscan](https://etherscan.io/), но это не всегда надежно, так как загруженный туда ABI может быть устаревшим. Убедитесь, что у Вас есть нужный ABI, в противном случае запуск Вашего субграфа будет неудачным. - -## Схема GraphQL - -Схема для Вашего субграфа находится в файле `schema.graphql`. Схемы GraphQL определяются с использованием языка определения интерфейса GraphQL. Если Вы никогда ранее не писали схему GraphQL, рекомендуем ознакомиться с этим руководством по системе типов GraphQL. Справочную документацию по схемам GraphQL можно найти в разделе [GraphQL API](/querying/graphql-api). - -## Определение Объектов - -Прежде чем определять объекты, важно сделать шаг назад и подумать о том, как структурированы и связаны Ваши данные. Все запросы будут выполняться к модели данных, определенной в схеме субграфа, и объектам, проиндексированным этим субграфом. Для этого рекомендуется определить схему субграфа таким образом, чтобы она соответствовала потребностям Вашего децентрализованного приложения. Может быть полезно представить объекты как "объекты, содержащие данные", а не как события или функции. - -С помощью The Graph Вы просто определяете типы объектов в `schema.graphql`, и узел The Graph будет генерировать поля верхнего уровня для запроса отдельных экземпляров и коллекций этого типа объектов. Каждый тип, который должен быть объектом, должен быть аннотирован директивой `@entity`. По умолчанию объекты изменяемы, что означает, что мэппинги могут загружать существующие объекты, изменять их и сохранять их новую версию. Измененяемость имеет свою цену, и для типов объектов, для которых известно, что они никогда не будут изменены, например, потому что они просто содержат данные, идентично извлеченные из чейна, рекомендуется помечать их как неизменяемые с помощью `@entity(immutable: true)`. Мэппинги могут вносить изменения в неизменяемые объекты до тех пор, пока эти изменения происходят в том же блоке, в котором был создан объект. Неизменяемые объекты гораздо быстрее записываются и запрашиваются, и поэтому их следует использовать каждый раз, когда это возможно. - -### Удачный пример - -Приведенный ниже объект `Gravatar` структурирован вокруг объекта Gravatar и является хорошим примером того, как объект может быть определен. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Неудачный пример - -Приведенные ниже примеры объектов `GravatarAccepted` и `GravatarDeclined` основаны на событиях. Не рекомендуется сопоставлять события или вызовы функций с объектами 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Дополнительные и обязательные поля - -Поля объекта могут быть определены как обязательные или необязательные. Обязательные поля обозначены символом `!` в схеме. Если в мэппинге не задано обязательное поле, то при запросе к нему будет выдана эта ошибка: - -``` -Null value resolved for non-null field 'name' -``` - -У каждого объекта должно быть поле `id`, у которого должен быть тип `Bytes!` или `String!`. Обычно рекомендуется использовать `Bytes!`, если только `id` не содержит удобочитаемый текст, поскольку объекты с `!` будут записываться и запрашиваться быстрее, чем объекты с `String!``id`. Поле `id` служит первичным ключом и должно быть уникальным среди всех объектов одного типа. В силу исторических причин тип `ID!` также принимается и является синонимом `String!`. - -Для некоторых типов объектов `id` создается из идентификаторов двух других объектов; этому способствует `concat`, например, для формирования id `let id = left.id.concat(right.id)` из идентификаторов `left` и `right`. Аналогично этому, чтобы создать идентификатор из идентификатора существующего объекта и счетчика `count`, можно использовать `let id = left.id.concatI32(count)`. Объединение гарантированно приведёт к созданию уникальных идентификаторов, если длина `left` одинакова для всех таких объектов, например, потому что `left.id` является `Address`. - -### Встроенные скалярные типы - -#### Поддерживаемые GraphQL скаляры - -Мы поддерживаем следующие скаляры в нашем GraphQL API: - -| Тип | Описание | -| --- | --- | -| `Bytes` | Массив байтов, представленный в виде шестнадцатеричной строки. Обычно используется для хэшей и адресов Ethereum. | -| `String` | Скаляр для значений `string`. Нулевые символы не поддерживаются и автоматически удаляются. | -| `Boolean` | Скаляр для значений `boolean`. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | 8-байтовое целое число со знаком, также называемое 64-битным целым числом со знаком, может хранить значения в диапазоне от -9 223 372 036 854 775 808 до 9 223 372 036 854 775 807. Предпочтительно использовать это для представления `i64` из Ethereum. | -| `BigInt` | Большие целые числа. Используются для типов Ethereum `uint32`, `int64`, `uint64`, ..., `uint256`. Примечание: Все, что находится ниже `uint32`, например, `int32`, `uint24` или `int8`, представлено как `i32`. | -| `BigDecimal` | `BigDecimal` Десятичные дроби высокой точности, представленные в виде значащего числа и экспоненты. Диапазон значений экспоненты от -6143 до +6144. Округляется до 34 значащих цифр. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Перечисления - -Вы также можете создавать перечисления внутри схемы. Перечисления имеют следующий синтаксис: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Как только перечисление определено в схеме, Вы можете использовать строковое представление значения перечисления, чтобы задать поле перечисления для объекта. Например, Вы можете установить для `tokenStatus` значение `SecondOwner`, сначала определив свой объект, а затем установив в поле `entity.tokenStatus = "SecondOwner"`. Приведенный ниже пример демонстрирует, как будет выглядеть объект Token с полем enum: - -Более подробную информацию о написании перечислений можно найти в [Документации по GraphQL](https://graphql.org/learn/schema/). - -#### Связи объектов - -Объект может иметь связь с одним или несколькими другими объектами в Вашей схеме. Эти связи могут быть использованы в Ваших запросах. Связи в The Graph являются однонаправленными. Можно смоделировать двунаправленные связи, определив однонаправленную связь на любом "конце" связи. - -Связи определяются для объектов точно так же, как и для любого другого поля, за исключением того, что в качестве типа указывается тип другого объекта. - -#### Связи "Один к одному" - -Определите тип объекта `Transaction` с необязательной связью "один к одному" с типом объекта `transactionReceipt`: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### Связи "Один ко многим" - -Определите тип объекта `TokenBalance` с обязательной связью "один ко многим" с типом объекта Token: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Обратные запросы - -Обратные зпросы могут быть определены для объекта с помощью поля `@derivedFrom`. При этом в объекте создается виртуальное поле, которое можно запрашивать, но нельзя задать вручную через API мэппингов. Скорее, оно вытекает из отношений, определенных для другого объекта. Для таких отношений редко имеет смысл сохранять обе стороны связи, а производительность как индексирования, так и запросов будет выше, когда сохраняется только одна сторона, а другая является производной. - -Для связей "один ко многим" связь всегда должна храниться на стороне "один", а сторона "многие" всегда должна быть производной. Такое сохранение связи, вместо хранения массива объектов на стороне "многие", приведет к значительному повышению производительности как при индексации, так и при запросах к субграфам. В общем, следует избегать хранения массивов объектов настолько, насколько это возможно. - -#### Пример - -Мы можем сделать балансы для токена доступными из самого токена, создав поле `tokenBalances`: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Связи "Многие ко многим" - -Для связей "многие ко многим", таких, например, как пользователи, каждый из которых может принадлежать к любому числу организаций, наиболее простым, но, как правило, не самым производительным способом моделирования связей является создание массива в каждом из двух задействованных объектов. Если связь симметрична, то необходимо сохранить только одну сторону связи, а другая сторона может быть выведена. - -#### Пример - -Определите обратный запрос от типа объекта `User` к типу объекта `Organization`. В приведенном ниже примере это достигается путем поиска атрибута `members` внутри объекта `Organization`. В запросах поле `organizations` в `User` будет разрешено путем поиска всех объектов `Organization`, включающих идентификатор пользователя. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -Более эффективный способ сохранить эту взаимосвязь - с помощью таблицы мэппинга, которая содержит по одной записи для каждой пары `User` / `Organization` со схемой, подобной - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -Этот подход требует, чтобы запросы опускались на один дополнительный уровень для получения, например, сведений об организациях для пользователей: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -Такой более сложный способ хранения связей "многие ко многим" приведет к уменьшению объема хранимых данных для субграфа и, следовательно, к тому, что субграф будет значительно быстрее индексироваться и запрашиваться. - -#### Добавление комментариев к схеме - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Определение полей полнотекстового поиска - -Полнотекстовые поисковые запросы фильтруют и ранжируют объекты на основе введенных данных текстового запроса. Полнотекстовые запросы способны возвращать совпадения по схожим словам путем обработки текста запроса в виде строк перед сравнением с индексированными текстовыми данными. - -Определение полнотекстового запроса включает в себя название запроса, словарь языка, используемый для обработки текстовых полей, алгоритм ранжирования, используемый для упорядочивания результатов, и поля, включенные в поиск. Каждый полнотекстовый запрос может охватывать несколько полей, но все включенные поля должны относиться к одному типу объекта. - -Чтобы добавить полнотекстовый запрос, включите тип `_Schema_` с полнотекстовой директивой в схему GraphQL. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -Поле example `bandSearch` можно использовать в запросах для фильтрации объектов `Band` на основе текстовых документов в `name`, `description` и `bio`.> поля. Перейдите к [GraphQL API - запросы](/querying/graphql-api#queries) для описания API полнотекстового поиска и дополнительных примеров использования. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Управление функциями](#experimental-features):** Начиная с `specVersion` `0.0.4` и далее, `fullTextSearch` должно быть объявлено в разделе `features` в манифесте субграфа. - -### Поддерживаемые языки - -Выбор другого языка окажет решающее, хотя иногда и неуловимое влияние на API полнотекстового поиска. Поля, охватываемые полем полнотекстового запроса, рассматриваются в контексте выбранного языка, поэтому лексемы, полученные в результате анализа и поисковых запросов, варьируются от языка к языку. Например: при использовании поддерживаемого турецкого словаря "token" переводится как "toke", в то время как, конечно, словарь английского языка переводит его в "token". - -Поддерживаемые языковые словари: - -| Код | Словарь | -| ------- | ------------- | -| простой | Общий | -| da | Датский | -| nl | Голландский | -| en | Английский | -| fi | Финский | -| fr | Французский | -| de | Немецкий | -| hu | Венгерский | -| it | Итальянский | -| no | Норвежский | -| pt | Португальский | -| ro | Румынский | -| ru | Русский | -| es | Испанский | -| sv | Шведский | -| tr | Турецкий | - -### Алгоритмы ранжирования - -Поддерживаемые алгоритмы для упорядочивания результатов: - -| Алгоритм | Описание | -| ------------- | ---------------------------------------------------------------------------------------------- | -| rank | Используйте качество соответствия (0-1) полнотекстового запроса, чтобы упорядочить результаты. | -| proximityRank | Аналогично рангу, но также включает в себя близость совпадений. | - -## Написание мэппингов - -Мэппинги берут данные из определенного источника и преобразуют их в объекты, которые определены в Вашей схеме. Мэппинги записываются в подмножестве [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html), которое называется [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki), и которое может быть скомпилировано в WASM ([ WebAssembly ](https://webassembly.org/)). AssemblyScript более строг, чем обычный TypeScript, но при этом предоставляет знакомый синтаксис. - -Для каждого обработчика событий, определенного в `subgraph.yaml` в разделе `mapping.EventHandlers`, создайте экспортируемую функцию с тем же именем. Каждый обработчик должен принимать один параметр с именем `event` с типом, соответствующим имени обрабатываемого события. - -В примере субграф `src/mapping.ts` содержит обработчики для событий `NewGravatar` и `UpdatedGravatar`: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -Первый обработчик принимает событие `NewGravatar` и создает новый объект `Gravatar` с помощью `new Gravatar(event.params.id.toHex())`, заполняя поля объекта, используя соответствующие параметры события. Этот экземпляр объекта представлен переменной `gravatar` со значением идентификатора `event.params.id.toHex()`. - -Второй обработчик пытается загрузить существующий `Gravatar` из хранилища узлов The Graph. Если его еще нет, он создается по требованию. Затем объект обновляется в соответствии с новыми параметрами события, прежде чем он будет сохранен обратно в хранилище с помощью `gravatar.save()`. - -### Рекомендуемые идентификаторы для создания новых объектов - -Настоятельно рекомендуется использовать `Bytes` в качестве типа для полей `id` и использовать `String` только для атрибутов, которые действительно содержат удобочитаемый текст, например имя токена. Ниже приведены некоторые рекомендуемые значения `id`, которые следует учитывать при создании новых объектов. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- Для объектов, которые хранят агрегированные данные, например ежедневные объемы торгов, `id` обычно содержит номер дня. В данном случае полезно использовать `Bytes` в качестве `id`. Определение `id` будет выглядеть следующим образом: - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Преобразуйте постоянные адреса в `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -Существует [Библиотека Typescript Graph](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts), которая содержит утилиты для взаимодействия с хранилищем Graph Node и удобства для обработки данных и объектов смарт-контрактов. Её можно импортировать в `mapping.ts` из `@graphprotocol/graph-ts`. - -### Обработка объектов с одинаковыми идентификаторами - -При создании и сохранении нового объекта, если объект с таким же идентификатором уже существует, в процессе слияния приоритетны свойства нового объекта. Это означает, что существующий объект будет обновлен значениями из нового объекта. - -Если для поля нового объекта с тем же идентификатором намеренно установлено нулевое значение, существующий объект будет обновлен с использованием нулевого значения. - -Если для поля в новом объекте с тем же идентификатором не установлено значение, поле также будет иметь значение null. - -## Генерация кода - -Для упрощения и обеспечения безопасности типов при работе со смарт-контрактами, событиями и объектами Graph CLI может генерировать типы AssemblyScript на основе схемы GraphQL субграфа и ABI контрактов, включенных в источники данных. - -Это делается с помощью - -```sh -graph codegen [--output-dir ] [] -``` - -но в большинстве случаев субграфы уже предварительно сконфигурированы с помощью `package.json`, что позволяет Вам просто запустить одно из следующих действий для достижения того же результата: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Это сгенерирует класс AssemblyScript для каждого смарт-контракта в файлах ABI, упомянутых в `subgraph.yaml`, позволяя Вам привязывать эти контракты к определенным адресам в мэппигах и вызывать контрактные методы, доступные только для чтения, для обрабатываемого блока. Кроме того, для каждого события контракта генерируется класс, обеспечивающий удобный доступ к параметрам события, а также к блоку и транзакции, от которых произошло событие. Все эти типы записываются в `//.ts`. В примере субграфа это будет код `generated/Gravity/Gravity.ts`, позволяющий импортировать эти типы с помощью мэппинга. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -В дополнение к этому, для каждого типа объекта в схеме GraphQL субграфа генерируется по одному классу. Эти классы обеспечивают безопасную для типов загрузку объектов, доступ к чтению и записи в поля объекта, а также метод `save()` для записи объектов в хранилище. Все классы объектов записываются в `/schema.ts`, что позволяет мэппингам импортировать с их помощью - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Примечание:** Генерация кода должна выполняться повторно после каждого изменения схемы GraphQL или ABIS, включенного в манифест. Это также должно быть выполнено по крайней мере один раз перед сборкой или развертыванием субграфа. - -Генерация кода не проверяет Ваш мэппинг код в `src/mapping.ts`. Если Вы хотите проверить это, прежде чем пытаться развернуть свой субграф в Graph Explorer, Вы можете запустить `yarn build` и исправить любые синтаксические ошибки, которые может обнаружить компилятор TypeScript. - -## Шаблоны источников данных - -Распространенным шаблоном в смарт-контрактах, совместимых с EVM, является использование реестровых или заводских контрактов, когда один контракт создает, управляет или ссылается на произвольное количество других контрактов, каждый из которых имеет свое собственное состояние и события. - -Адреса этих субконтрактов могут быть известны или не известны заранее, и многие из этих контрактов могут быть созданы и/или добавлены с течением времени. Поэтому в таких случаях определение одного источника данных или фиксированного количества источников данных невозможно и необходим более динамичный подход: _data source templates_. - -### Источник данных для основного контракта - -Сначала Вы определяете обычный источник данных для основного контракта. Во фрагменте ниже показан упрощенный пример источника данных для контракта фабрики обмена [Uniswap](https://uniswap.org). Обратите внимание на обработчик события `New Exchange(address,address)`. Этот сигнал выдается, когда новый контракт обмена создается в цепочке с помощью заводского контракта. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Шаблоны источников данных для динамически создаваемых контрактов - -Затем Вы добавляете _data source templates_ в манифест. Они идентичны обычным источникам данных, за исключением того, что в них отсутствует предопределенный адрес контракта в `source`. Как правило, Вы определяете один шаблон для каждого типа субконтракта, управляемого родительским контрактом, или на который ссылается родительский контракт. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Создание шаблона источника данных - -На заключительном шаге Вы обновляете мэппинг основного контракта, чтобы создать экземпляр динамического источника данных из одного из шаблонов. В данном примере в отображение основного контракта импортируется шаблон `Exchange` и вызывается метод `Exchange.create(address)`, чтобы начать индексирование нового контракта обмена. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Примечание:** Новый источник данных будет обрабатывать только вызовы и события для блока, в котором он был создан, и всех последующих блоков, но не будет обрабатывать исторические данные, т.е. данные, которые содержатся в предыдущих блоках. -> -> Если предыдущие блоки содержат данные, относящиеся к новому источнику данных, лучше всего проиндексировать эти данные, считывая текущее состояние контракта и создавая объекты, представляющие это состояние на момент создания нового источника данных. - -### Контекст источника данных - -Контексты источника данных позволяют передавать дополнительную конфигурацию при создании экземпляра шаблона. В нашем примере предположим, что биржи связаны с определенной торговой парой, которая включена в событие `newExchange`. Эта информация может быть передана в созданный экземпляр источника данных, например, следующим образом: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Внутри мэппинга шаблона `Exchange` затем можно получить доступ к контексту: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Существуют установщики и получатели, такие как `setString` и `getString` для всех типов значений. - -## Стартовые блоки - -`startBlock` - это необязательный параметр, который позволяет Вам определить, с какого блока в цепочке источник данных начнет индексацию. Установка начального блока позволяет источнику данных пропускать потенциально миллионы блоков, которые не имеют отношения к делу. Как правило, разработчик субграфа устанавливает `startBlock` в блок, в котором был создан смарт-контракт источника данных. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Примечание:** Блок создания контракта можно быстро найти в Etherscan: -> -> 1. Найдите контракт, введя его адрес в строке поиска. -> 2. Нажмите на хэш транзакции создания в разделе `Contract Creator`. -> 3. Загрузите страницу сведений о транзакции, где Вы найдете начальный блок для этого контракта. - -## Подсказки индексатору - -Параметр `indexerHints` в манифесте субграфа содержит директивы для индексаторов по обработке и управлению субграфом. Это влияет на оперативные решения по обработке данных, стратегиям индексации и оптимизации. В настоящее время в нем предусмотрена опция `prune` для управления сохранением или сокращением исторических данных. - -> Эта функция доступна начиная с `specVersion: 1.0.0` - -### Сокращение - -`indexerHints.prune`: определяет срок хранения исторических данных блока для субграфа. Опции включают в себя: - -1. `"never"`: удаление исторических данных не производится; хранит всю историю. -2. `"auto"`: сохраняет минимально необходимую историю, заданную индексатором, оптимизируя производительность запросов. -3. Конкретное число: устанавливает индивидуальный лимит на количество сохраняемых исторических блоков. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Обычно рекомендуется использовать `"auto"`, поскольку оно максимально увеличивает производительность запросов и достаточно для большинства пользователей, которым не требуется доступ к обширным историческим данным. - -Для субграфов, использующих [запросы о путешествиях во времени](/querying/graphql-api/#time-travel-queries), рекомендуется либо установить определенное количество блоков для хранения исторических данных, либо использовать `prune: never`, чтобы сохранить все исторические состояния объектов. Ниже приведены примеры того, как настроить оба параметра в настройках вашего субграфа: - -Чтобы сохранить определенный объем исторических данных: - -``` - indexerHints: - prune: 1000 # Замените 1000 на желаемое количество блоков, которые нужно сохранить -``` - -Чтобы сохранить полную историю состояний объекта, выполните следующее: - -``` -indexerHints: - prune: never -``` - -Вы можете проверить самый ранний блок (с историческим состоянием) для данного субграфа, выполнив запрос к [API статуса индексирования](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Обратите внимание на то, что `earliestBlock` — это самый ранний блок с историческими данными, который будет более поздним, чем `startBlock`, указанный в манифесте, если субграф был удален. - -## Обработчики событий - -Обработчики событий в субграфе реагируют на конкретные события, генерируемые смарт-контрактами в блокчейне, и запускают обработчики, определенные в манифесте подграфа. Это позволяет субграфам обрабатывать и хранить данные о событиях в соответствии с определенной логикой. - -### Определение обработчика событий - -Обработчик событий объявлен внутри источника данных в конфигурации YAML субграфа. Он определяет, какие события следует прослушивать, и соответствующую функцию, которую необходимо выполнить при обнаружении этих событий. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Обработчики вызовов - -Хотя события обеспечивают эффективный способ сбора соответствующих изменений в состоянии контракта, многие контракты избегают создания логов для оптимизации затрат на газ. В этих случаях субграф может подписаться на обращения к контракту источника данных. Это достигается путем определения обработчиков вызовов, ссылающихся на сигнатуру функции, и обработчика мэппинга, который будет обрабатывать вызовы этой функции. Чтобы обработать эти вызовы, обработчик мэппинга получит `ethereum.Call` в качестве аргумента, содержащего типизированные входы и выходы вызова. Вызовы, выполненные на любой глубине цепочки вызовов транзакции, запускают мэппинг, позволяя фиксировать действия с контрактом источника данных через прокси-контракты. - -Обработчики вызовов срабатывают только в одном из двух случаев: когда указанная функция вызывается учетной записью, отличной от самого контракта, или когда она помечена как внешняя в Solidity и вызывается как часть другой функции в том же контракте. - -> **Примечание:** Обработчики вызовов в настоящее время зависят от Parity tracing API. Некоторые сети, такие как BNB chain и Arbitrium, не поддерживают этот API. Если субграф, индексирующий одну из этих сетей, содержит один или несколько обработчиков вызовов, синхронизация не начнется. Разработчикам субграфов следует вместо этого использовать обработчики событий. Они гораздо более производительны, чем обработчики вызовов, и поддерживаются в каждой сети evm. - -### Определение обработчика вызова - -Чтобы определить обработчика вызовов в Вашем манифесте, просто добавьте массив `callHandlers` под источником данных, на который Вы хотели бы подписаться. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`function` - это нормализованная сигнатура функции, по которой можно фильтровать вызовы. Свойство `handler` - это имя функции в Вашем мэппинге, которую Вы хотели бы выполнить при вызове целевой функции в контракте источника данных. - -### Функция мэппинга - -Каждый обработчик вызова принимает один параметр, тип которого соответствует имени вызываемой функции. В приведенном выше примере субграфа мэппинг содержит обработчик для случаев, когда вызывается функция `createGravatar` и получает параметр `CreateGravatarCall` в качестве аргумента: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -Функция `handleCreateGravatar` принимает новый `CreateGravatarCall`, который является подклассом `ethereum.Call`, предоставляемый `@graphprotocol/graph-ts`, который включает в себя введенные входы и выходы о звонке. Тип `CreateGravatarCall` генерируется для Вас при запуске `graph codegen`. - -## Обработчики блоков - -В дополнение к подписке на события контракта или вызовы функций, субграф может захотеть обновить свои данные по мере добавления в цепочку новых блоков. Чтобы добиться этого, субграф может запускать функцию после каждого блока или после блоков, соответствующих заранее определенному фильтру. - -### Поддерживаемые фильтры - -#### Фильтр вызовов - -```yaml -filter: - kind: call -``` - -_Определенный обработчик будет вызван один раз для каждого блока, содержащего обращение к контракту (источнику данных), в соответствии с которым определен обработчик._ - -> **Примечание:** Фильтр `call` в настоящее время зависит от Parity tracing API. Некоторые сети, такие как BNB chain и Arbitrium, не поддерживают этот API. Если субграф, индексирующий одну из этих сетей, содержит один или несколько обработчиков блоков с фильтром `call`, синхронизация не начнется. - -Отсутствие фильтра для обработчика блоков гарантирует, что обработчик вызывается для каждого блока. Источник данных может содержать только один обработчик блоков для каждого типа фильтра. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Фильтр опроса - -> **Требуется `specVersion` >= 0.0.8** - -> **Примечание.** Фильтры опроса доступны только для источников данных `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -Определенный обработчик будет вызываться один раз для каждого блока `n`, где `n` — это значение, указанное в поле `every`. Эта конфигурация позволяет субграфу выполнять определенные операции через регулярные интервалы блоков. - -#### Однократный фильтр - -> **Требуется `specVersion` >= 0.0.8** - -> **Примечание.** Однократные фильтры доступны только для источников данных `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -Определенный обработчик с однократным фильтром будет вызываться только один раз перед запуском всех остальных обработчиков. Эта конфигурация позволяет субграфу использовать обработчик в качестве обработчика инициализации, выполняя определенные задачи в начале индексирования. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Функция мэппинга - -Функция мэппинга получит `ethereum.Block` в качестве своего единственного аргумента. Подобно функциям мэппинга событий, эта функция может получать доступ к существующим в хранилище объектам субграфа, вызывать смарт-контракты и создавать или обновлять объекты. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Анонимные события - -Если Вам нужно обрабатывать анонимные события в Solidity, это можно сделать, указав тему события 0, как показано в примере: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -Событие будет запущено только в том случае, если подпись и тема 0 совпадают. По умолчанию `topic0` равен хэшу сигнатуры события. - -## Подтверждения транзакций в обработчиках событий - -Начиная с `specVersion` `0.0.5` и `apiVersion` `0.0.7` обработчики событий могут иметь доступ к подтверждению транзакции, которая их отправила. - -Для этого обработчики событий должны быть объявлены в манифесте субграфа с новым ключом `receipt: true`, который является необязательным и по умолчанию имеет значение false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Внутри функции обработчика доступ к подтверждению можно получить в поле `Event.receipt`. Если для ключа `receipt` установлено значение`false` или оно опущено в манифесте, вместо него будет возвращено значение `null`. - -## Экспериментальные функции - -Начиная с `specVersion` `0.0.4`, функции субграфа должны быть явно объявлены в разделе `features` на верхнем уровне файла манифеста, используя их имя `camelCase`, как указано в таблице ниже: - -| Функция | Имя | -| -------------------------------------------------------- | ---------------- | -| [Нефатальные ошибки](#non-fatal-errors) | `nonFatalErrors` | -| [Полнотекстовый поиск](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Графтинг](#grafting-onto-existing-subgraphs) | `grafting` | - -Например, если в субграфе используются функции **Full-Text Search** и **Non-fatal Errors**, поле `features` в манифесте должно быть: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Обратите внимание, что использование функции без ее объявления приведет к **ошибке проверки** во время развертывания субграфа, но никаких ошибок не возникнет, если функция объявлена, но не используется. - -### Тайм-серии и агрегации - -Тайм-серии и агрегации позволяют Вашему субграфу отслеживать такие статистические данные, как средняя цена за день, общий объем переводов за час и т. д. - -Эта функция представляет два новых типа объектов субграфов. Объекты тайм-серий записывают точки данных с временными метками. Объекты агрегирования выполняют заранее объявленные вычисления над точками данных тайм-серий ежечасно или ежедневно, а затем сохраняют результаты для быстрого доступа через GraphQL. - -#### Пример схемы - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Определение тайм-серий и агрегаций - -Объекты тайм-серий определяются с помощью `@entity(timeseries: true)` в schema.graphql. Каждый объект тайм-серии должен иметь уникальный идентификатор типа int8, метку времени типа Timestamp и включать данные, которые будут использоваться для вычислений объектами агрегации. Эти объекты тайм-серий могут быть сохранены в обычных обработчиках триггеров и выступать в качестве «необработанных данных» для объектов агрегации. - -Объекты агрегации определяются с помощью `@aggregation` в schema.graphql. Каждый объект агрегирования определяет источник, из которого он будет собирать данные (который должен быть объектом тайм-серии), устанавливает интервалы (например, час, день) и указывает функцию агрегирования, которую он будет использовать (например, сумма, количество, минимум, максимум, первый, последний). Объекты агрегации рассчитываются автоматически на основе указанного источника в конце необходимого интервала. - -#### Доступные интервалы агрегации - -- `hour`: устанавливает период тайм-серии каждый час, в час. -- `day`: устанавливает период тайм-серий ежедневный, который начинается и заканчивается в 00:00. - -#### Доступные функции агрегации - -- `sum`: сумма всех значений. -- `count`: количество значений. -- `min`: минимальное значение. -- `max`: максимальное значение. -- `first`: первое значение в периоде. -- `last`: последнее значение за период. - -#### Пример запроса агрегации - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Примечание: - -Чтобы использовать тайм-серии и агрегации, субграф должен иметь версию спецификации ≥1.1.0. Обратите внимание, что эта функция может претерпеть значительные изменения, которые могут повлиять на обратную совместимость. - -[Подробнее](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) о тайм-сериях и агрегациях. - -### Нефатальные ошибки - -Ошибки индексирования в уже синхронизированных субграфах по умолчанию приведут к сбою субграфа и прекращению синхронизации. В качестве альтернативы субграфы можно настроить на продолжение синхронизации при наличии ошибок, игнорируя изменения, внесенные обработчиком, который спровоцировал ошибку. Это дает авторам субграфов время на исправление своих субграфов, в то время как запросы к последнему блоку продолжают обрабатываться, хотя результаты могут быть противоречивыми из-за бага, вызвавшего ошибку. Обратите внимание на то, что некоторые ошибки всё равно всегда будут фатальны. Чтобы быть нефатальной, ошибка должна быть детерминированной. - -> **Примечание:** Сеть The Graph пока не поддерживает нефатальные ошибки, и разработчикам не следует разворачивать субграфы, использующие эту функциональность, в сети через Studio. - -Для включения нефатальных ошибок необходимо установить в манифесте субграфа следующий флаг функции: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -В запросе также необходимо разрешить запрос данных с потенциальными несоответствиями с помощью аргумента `subgraphError`. Также рекомендуется запросить `_meta`, для проверки того, что субграф пропустил ошибки, как в примере: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Если субграф обнаруживает ошибку, этот запрос вернет как данные, так и ошибку graphql с сообщением `"indexing_error"`, как в данном примере ответа: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Графтинг (перенос) на существующие субграфы - -> **Примечание:** не рекомендуется использовать графтинг при первоначальном переходе на сеть The Graph. Подробнее [здесь](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -Когда субграф развертывается впервые, он начинает индексировать события в блоке genesis соответствующего чейна (или в `startBlock`, определенном для каждого источника данных). В некоторых обстоятельствах полезно повторно использовать данные из существующего субграфа и начинать индексацию с гораздо более позднего блока. Этот режим индексации называется _Grafting_. Графтинг, например, полезен во время разработки, чтобы быстро устранить простые ошибки в отображениях или временно возобновить работу существующего субграфа после его сбоя. - -Субграф графтится (переносится) к базовому субграфу, когда манифест субграфа в `subgraph.yaml` содержит блок `graft` на верхнем уровне: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -Когда развертывается субграф, манифест которого содержит блок `graft`, узел The Graph скопирует данные `base` субграфа вплоть до указанного `block` включительно, а затем продолжит индексирование нового субграфа начиная с этого блока. Базовый субграф должен существовать на целевом экземпляре узла The Graph и должен быть проиндексирован по крайней мере до заданного блока. Из-за этого ограничения графтинг следует использовать только в процессе разработки или в экстренных случаях, чтобы ускорить создание эквивалентного графтового субграфа. - -Поскольку графтинг копирует, а не индексирует базовые данные, гораздо быстрее перенести субграф в нужный блок, чем индексировать с нуля, хотя для очень больших субграфов копирование исходных данных может занять несколько часов. Пока графтовый субграф инициализируется, узел The Graph будет регистрировать информацию о типах объектов, которые уже были скопированы. - -Графтовый субграф может использовать схему GraphQL, которая не идентична схеме базового субграфа, а просто совместима с ней. Она сама по себе должна быть допустимой схемой субграфа, но может отличаться от схемы базового субграфа следующими способами: - -- Она добавляет или удаляет типы объектов -- Она удаляет атрибуты из типов объектов -- Она добавляет в типы объектов атрибуты с возможностью обнуления -- Она превращает ненулевые атрибуты в нулевые -- Она добавляет значения в перечисления -- Она добавляет или удаляет интерфейсы -- Она изменяется в зависимости от того, для каких типов объектов реализован тот или иной интерфейс - -> **[Управление функционалом](#experimental-features):** `grafting` должен быть объявлен в разделе `features` в манифесте субграфа. - -## IPFS/Arweave File Data Sources - -Источники файловых данных — это новая функциональность субграфа для надежного и расширенного доступа к данным вне чейна во время индексации. Источники данных файлов поддерживают получение файлов из IPFS и Arweave. - -> Это также закладывает основу для детерминированного индексирования данных вне сети, а также потенциального введения произвольных данных из HTTP-источников. - -### Обзор - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -Это похоже на [существующие шаблоны источников данных](/developing/creating-a-subgraph/#data-source-templates), которые используются для динамического создания новых источников данных на чейн-основе. - -> Это заменяет существующий API `ipfs.cat` - -### Руководство по обновлению - -#### Обновите `graph-ts` и `graph-cli` - -Для файловых источников данных требуется graph-ts >=0.29.0 и graph-cli >=0.33.1 - -#### Добавьте новый тип объекта, который будет обновляться при обнаружении файлов - -Источники файловых данных не могут получать доступ к объектам на чейн-основе или обновлять их, но должны обновлять объекты, специфичные для файлов. - -Это может означать разделение полей существующих объектов на отдельные объекты, связанные между собой. - -Исходный объединенный объект: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Новый разделенный объект: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Если между родительским объектом и результирующим объектом-источником данных существует связь1:1, то наиболее простым вариантом будет связать родительский объект с результирующим файловым объектом, используя в качестве поиска IPFS CID. Свяжитесь с нами в Discord, если у Вас возникли трудности с моделированием новых объектов на основе файлов! - -> Вы можете использовать [вложенные фильтры](/querying/graphql-api/#example-for-nested-entity-filtering) для фильтрации родительских объектов на основе этих вложенных объектов. - -#### Добавьте новый шаблонный источник данных с помощью `kind: file/ipfs` или `kind: file/arweave`. - -Это источник данных, который будет создан при обнаружении интересующего файла. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> В настоящее время требуется `abis`, хотя невозможно вызывать контракты из файловых источников данных - -В файле-источнике данных должны быть конкретно указаны все типы объектов, с которыми он будет взаимодействовать в рамках `entities`. Дополнительные сведения см. в разделе [ограничения](#limitations). - -#### Создание нового обработчика для обработки файлов - -Этот обработчик должен принимать один параметр `Bytes`, который будет содержимым файла, когда он будет найден, который затем можно будет обработать. Часто это файл JSON, который можно обработать с помощью помощников `graph-ts` ([документация](/developing/graph-ts/api/#json-api)). - -Доступ к CID файла в виде читаемой строки можно получить через `dataSource` следующим образом: - -```typescript -const cid = dataSource.stringParam() -``` - -Пример обработчика: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Создание файловых источников данных при необходимости - -Теперь вы можете создавать файловые источники данных во время выполнения обработчиков на чейн-основе: - -- Импортируйте шаблон из автоматически созданных `templates` -- вызовите `TemplateName.create(cid: string)` из мэппинга, где cid является действительным идентификатором контента для IPFS или Arweave - -Для IPFS Graph Node поддерживает [идентификаторы контента v0 и v1](https://docs.ipfs.tech/concepts/content-addressing/), а также идентификаторы контента с каталогами (например, `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -Что касается Arweave, то начиная с версии 0.33.0 Graph Node может извлекать файлы, хранящиеся в Arweave, на основе их [ID транзакции](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) из шлюза Arweave ([файл примера](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave поддерживает транзакции, загруженные через Irys (ранее Bundlr), а Graph Node также может получать файлы на основе [манифестов Irys](https://docs.irys.xyz/overview/gateways#indexing). - -Пример: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Этот пример кода предназначен для сборщика субграфа Crypto. Приведенный выше хеш ipfs представляет собой каталог с метаданными токена для всех NFT криптоковена. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //Это создает путь к метаданным для одного сборщика NFT Crypto. Он объединяет каталог с "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -Это создаст новый источник данных файла, который будет опрашивать настроенную конечную точку IPFS или Arweave Graph Node, повторяя попытку, если она не найдена. Когда файл будет найден, будет выполнен обработчик источника данных файла. - -В этом примере CID используется для поиска между родительским объектом `Token` и результирующим объектом `TokenMetadata`. - -> Раньше это была точка, в которой разработчик субграфа вызывал `ipfs.cat(CID)` для извлечения файла - -Поздравляем, Вы используете файловые источники данных! - -#### Развертывание субграфов - -Теперь Вы можете `build` (построить) и `deploy` (развернуть) свой субграф на любом узле The Graph >=v0.30.0-rc.0. - -#### Ограничения - -Обработчики и объекты файловых источников данных изолированы от других объектов субграфа, что гарантирует их детерминированность при выполнении и исключает загрязнение источников данных на чейн-основе. В частности: - -- Объекты, созданные с помощью файловых источников данных, неизменяемы и не могут быть обновлены -- Обработчики файловых источников данных не могут получить доступ к объектам из других файловых источников данных -- Объекты, связанные с источниками данных файлов, не могут быть доступны обработчикам на чейн-основе - -> Хотя это ограничение не должно вызывать проблем в большинстве случаев, для некоторых оно может вызвать сложности. Если у Вас возникли проблемы с моделированием Ваших файловых данных в субграфе, свяжитесь с нами через Discord! - -Кроме того, невозможно создать источники данных из файлового источника данных, будь то источник данных onchain или другой файловый источник данных. Это ограничение может быть снято в будущем. - -#### Лучшие практики - -Если Вы связываете метаданные NFT с соответствующими токенами, используйте хэш IPFS метаданных для ссылки на объект Metadata из объекта Token. Сохраните объект Metadata, используя хэш IPFS в качестве идентификатора. - -Вы можете использовать [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) при создании файловых источников данных для передачи дополнительной информации, которая будет доступна обработчику файлового источника данных. - -Если у Вас есть объекты, которые обновляются несколько раз, создайте уникальные объекты на основе файлов, используя хэш IPFS и идентификатор объекта, и ссылайтесь на них, используя производное поле в объекте на чейн-основе. - -> Мы работаем над улучшением приведенной выше рекомендации, поэтому запросы возвращают только "самую последнюю" версию - -#### Известные проблемы - -Файловые источники данных в настоящее время требуют ABI, даже если ABI не используются ([проблема](https://github.com/graphprotocol/graph-cli/issues/961)). Обходным решением является добавление любого ABI. - -Обработчики для файловых источников данных не могут находиться в файлах, которые импортируют привязки контракта `eth_call`, с ошибкой "unknown import: `ethereum::ethereum.call` has not been defined" ([проблема](https://github.com/graphprotocol/graph-node/issues/4309)). Обходным решением является создание обработчиков файловых источников данных в специальном файле. - -#### Примеры - -[Миграция субграфа Crypto Coven](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Ссылки - -[Источники данных GIP-файла](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ru/developing/creating-a-subgraph/_meta.js b/website/pages/ru/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/ru/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ru/developing/graph-ts/_meta.js b/website/pages/ru/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/ru/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ru/managing/deprecate-a-subgraph.mdx b/website/pages/ru/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/ru/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/ru/mips-faqs.mdx b/website/pages/ru/mips-faqs.mdx deleted file mode 100644 index 84b01a93830a..000000000000 --- a/website/pages/ru/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Введение - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Да - -### 14. Можно ли использовать заблокированные токены из программы куратора the graph для участия в тестовой сети MIP? - -Да - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Да - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/ru/querying/_meta.js b/website/pages/ru/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/ru/querying/_meta.js +++ b/website/pages/ru/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/ru/querying/graph-client/_meta.js b/website/pages/ru/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/ru/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sv/_meta.js b/website/pages/sv/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/sv/_meta.js +++ b/website/pages/sv/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index d4ffe991f866..000000000000 --- a/website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Distribuera en subgraf till värdtjänsten ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Lagra åtkomsttoken - -När du har skapat ett konto navigerar du till din [instrumentpanel](https://thegraph.com/hosted-service/dashboard). Kopiera åtkomsttoken som visas på instrumentpanelen och kör `graph auth --product hosted-service `. Detta kommer att lagra åtkomsttoken på din dator. Du behöver bara göra detta en gång, eller om du någonsin återskapar åtkomsttoken. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Bild** – Välj en bild som ska användas som förhandsgranskningsbild och miniatyrbild för subgrafen. - -**Subgraf namn** - Tillsammans med kontonamnet som subgrafen skapas under kommer detta också att definiera `kontonamn/undergrafnamn`-stilen namn som används för distributioner och GraphQL slutpunkter. _Det här fältet kan inte ändras senare._ - -**Konto** - Kontot som subgrafen skapas under. Detta kan vara kontot för en individ eller organisation. _Subgrafer kan inte flyttas mellan konton senare._ - -**Underrubrik** – Text som kommer att visas på kort för delgrafer. - -**Beskrivning** – Beskrivning av subgrafen, synlig på sidan med subgraf detaljer. - -**GitHub URL** – Länk till subgraf förrådet på GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -Du distribuerar subgrafen genom att köra `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -Undergrafens status växlar till `Synced` när Graph Node har extraherat alla data från historiska block. Graph Node fortsätter att inspektera block för din undergraf när dessa block minas. - -## Omdistribuera en subgraf - -När du gör ändringar i din subgrafdefinition, t. ex. för att åtgärda ett problem i entitetsmappningarna, kör du kommandot `yarn deploy` ovan igen för att distribuera den uppdaterade versionen av din subgraf. Varje uppdatering av en subgraf kräver att Graph Node indexerar om hela subgrafen, återigen med början i genesis-blocket. - -Om din tidigare distribuerade subgraf fortfarande har status `Syncing`, kommer den omedelbart att ersättas med den nyligen distribuerade versionen. Om den tidigare distribuerade subgrafen redan är helt synkroniserad, kommer Graph Node att markera den nyligen distribuerade versionen som `Pending version`, synkronisera den i bakgrunden och bara ersätta den för närvarande distribuerade versionen med den nya när den synkroniseras nya versionen är klar. Detta säkerställer att du har en subgraf att arbeta med medan den nya versionen synkroniseras. - -## Distribuera undergrafen till flera nätverk - -I vissa fall vill du distribuera samma undergraf till flera nätverk utan att duplicera all dess kod. Den största utmaningen med detta är att kontraktsadresserna på dessa nätverk är olika. - -### Använda graph-cli - -Både `graph build` (sedan `v0.29.0`) och `graph deploy` (eftersom `v0.32.0`) accepterar två nya alternativ: - -```sh -Alternativ: - - ... - --network Nätverkskonfiguration som ska användas från filen nätverk config - --network-file Sökväg till konfigurationsfil för nätverk (standard: "./networks.json") -``` - -Du kan använda alternativet `--network` för att ange en nätverkskonfiguration från en `json` standardfil (standard till `networks.json`) för att enkelt uppdatera din subgraf under utvecklingen. - -**Obs!** Kommandot `init` kommer nu att automatiskt generera en `networks.json` baserat på den tillhandahållna informationen. Du kommer då att kunna uppdatera befintliga eller lägga till ytterligare nätverk. - -Om du inte har en `networks.json`-fil måste du skapa en manuellt med följande struktur: - -```json -{ - "network1": { // nätverkets namn - "dataSource1": { // namn på datakällan - "address": "0xabc...", // Avtalets adress (frivillig uppgift) - "startBlock": 123456 // startBlock (valfritt) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Obs!** Du behöver inte ange någon av `templates` (om du har några) i konfigurationsfilen, bara ` dataSources`. Om det finns några `templates` deklarerade i filen `subgraph.yaml`, kommer deras nätverk automatiskt att uppdateras till det som anges med alternativet `--network`. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Så här ska nätverkets konfigurationsfil se ut: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Nu kan vi köra något av följande kommandon: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Nu är du redo att `yarn deploy`. - -**Obs:** Som nämnts tidigare, sedan `graph-cli 0.32.0` kan du direkt köra `yarn deploy` med `--nätverk` alternativ: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Använda subgraph.yaml mallen - -En lösning för äldre graph-cli versioner som tillåter parameterisering av aspekter som kontraktsadresser är att generera delar av den med hjälp av ett mallsystem som [Mustache](https://mustache.github.io/) eller [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -och - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Tillsammans med det skulle du ersätta nätverksnamnet och adresserna i manifestet med variabla platshållare `{{network}}` och `{{address}}` och byta namn på manifestet till t.ex. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -För att generera ett manifest till något av nätverken kan du lägga till ytterligare två kommandon till `package.json` tillsammans med ett beroende av `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -Ett fungerande exempel på detta hittar du [här](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Obs:** Detta tillvägagångssätt kan också tillämpas i mer komplexa situationer, där det är nödvändigt att ersätta mer än kontraktsadresser och nätverksnamn eller där mappningar eller ABI: er genereras från mallar. - -## Kontroll av undergrafens hälsa - -Om en subgraf synkroniseras framgångsrikt är det ett gott tecken på att den kommer att fortsätta att fungera bra för alltid. Nya triggers i nätverket kan dock göra att din subgraf stöter på ett otestat feltillstånd eller så kan den börja halka efter på grund av prestandaproblem eller problem med nodoperatörerna. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -Detta kommer att ge dig `chainHeadBlock` som du kan jämföra med `latestBlock` på din subgraf för att kontrollera om den körs efter. `synced` informerar om subgrafen någonsin har kommit ikapp kedjan. `health` kan för närvarande ta värdena `healthy` om inga fel inträffade, eller `failed` om det fanns ett fel som stoppade subgrafens framsteg. I det här fallet kan du kontrollera fältet `fatalError` för detaljer om detta fel. - -## Policy för arkivering av undergrafer för värdtjänster - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraf arkivpolitik - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Varje subgraf som påverkas av denna policy har en möjlighet att ta tillbaka versionen i fråga. diff --git a/website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 7bf304318dad..000000000000 --- a/website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Distribuera en undergraf till Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Installera Graph CLI (med antingen yarn eller npm) -- Skapa din subgraf i Subgraph Studio -- Autentisera ditt konto från CLI -- Distribuera en undergraf till Subgraph Studio - -## Installera Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Installera med yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Installera med npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Skapa din subgraf i Subgraph Studio - -Innan du distribuerar din faktiska subgraf måste du skapa en subgraf i [Subgraph Studio](https://thegraph.com/studio/). Vi rekommenderar att du läser vår [Studiodokumentation](/deploying/subgraph-studio) för att lära dig mer om detta. - -## Initiera din Subgraph - -När din subgraf har skapats i Subgraph Studio kan du initiera subgraf koden med detta kommando: - -```bash -graph init --studio -``` - -Värdet `` finns på sidan med information om din subgraf i Subgraf Studio: - -![Subgraf Studio - Snigel](/img/doc-subgraph-slug.png) - -När du har kört `graph init` kommer du att bli ombedd att ange kontraktsadressen, nätverket och ABI som du vill fråga efter. Om du gör detta genereras en ny mapp på din lokala dator med grundläggande kod för att börja arbeta med din subgraf. Du kan sedan slutföra din subgraf för att se till att den fungerar som förväntat. - -## Auth för grafer - -Innan du kan distribuera din undergraf till Subgraf Studio måste du logga in på ditt konto i CLI. För att göra detta behöver du din deploy key som du hittar på sidan "My Subgraphs" eller på sidan med information om din subgraf. - -Här är kommandot som du behöver använda för att autentisera dig från CLI: - -```bash -graph auth --studio -``` - -## Distribuera en undergraf till Subgraph Studio - -När du är redo kan du distribuera din subgraf till Subgraf Studio. Detta innebär inte att din subgraf publiceras i det decentraliserade nätverket, utan endast att den distribueras till ditt Studio-konto där du kan testa den och uppdatera metadata. - -Här är CLI-kommandot som du behöver använda för att distribuera din subgraf. - -```bash -graph deploy --studio -``` - -Efter att ha kört detta kommando kommer CLI att fråga efter en versionsetikett, du kan namnge den hur du vill, du kan använda etiketter som `0.1` och `0.2` eller använda bokstäver också som `uniswap-v2-0.1`. Dessa etiketter kommer att vara synliga i Graph Explorer och kan användas av curatorer för att bestämma om de vill signalera på den här versionen eller inte, så välj dem med omtanke. - -När du har distribuerat kan du testa din subgraf i Subgraf Studio med hjälp av lekplatsen, distribuera en annan version om det behövs, uppdatera metadata och när du är klar kan du publicera din subgraf i Graph Explorer. diff --git a/website/pages/sv/deploying/hosted-service.mdx b/website/pages/sv/deploying/hosted-service.mdx deleted file mode 100644 index fdfd6ef85911..000000000000 --- a/website/pages/sv/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Vad är värdtjänsten? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -I det här avsnittet går vi igenom hur du distribuerar en undergraf till [hostad tjänst](https://thegraph.com/hosted-service/). - -Om du inte har ett konto på den hostade tjänsten kan du registrera dig med ditt GitHub-konto. När du har autentiserat dig kan du börja skapa undergrafer via användargränssnittet och distribuera dem från din terminal. Värdtjänsten stöder ett antal nätverk, till exempel Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum med flera. - -För en fullständig lista, se [Nätverk som stöds](/developing/supported-networks/#hosted-service). - -## Skapa en Subgraf - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### Från ett befintligt avtal - -Om du redan har ett smart kontrakt distribuerat till ditt nätverk kan det vara ett bra sätt att komma igång med värdtjänsten genom att starta upp en ny subgraf från detta kontrakt. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -`` är i det här fallet din GitHub-användare eller organisationsnamn, `` är namnet på din subgraf och ``är det valfria namnet på katalogen där `graph init` kommer att placera exemplets subgrafmanifest. `` är adressen till ditt befintliga kontrakt. `` är namnet på nätverket som kontraktet lever på. `` är en lokal sökväg till en kontrakts ABI-fil. **Både `--nätverk` och `--abi` är valfria.** - -### Från ett exempel på en undergraf - -Det andra läget som `graph init` stöder är att skapa ett nytt projekt från ett exempel på en undergraf. Följande kommando gör detta: - -``` -graph init --from-example --product hosted-service / [] -``` - -Exemplet är baserat på Gravity-kontraktet av Dani Grant som hanterar användaravatarer och sänder ut `NewGravatar` eller `UpdateGravatar`-händelser närhelst avatarer skapas eller uppdateras. Subgrafen hanterar dessa händelser genom att skriva `Gravatar`-entiteter till Graph Node-arkivet och se till att dessa uppdateras i enlighet med händelserna. Fortsätt till [undergrafmanifestet](/developing/creating-a-subgraph#the-subgraph-manifest) för att bättre förstå vilka händelser från dina smarta kontrakt du ska uppmärksamma, mappningar och mer. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Nätverk som stöds av den hostade tjänsten - -Du kan hitta listan över de stödda nätverken [här](/developing/supported-networks). diff --git a/website/pages/sv/deploying/subgraph-studio.mdx b/website/pages/sv/deploying/subgraph-studio.mdx deleted file mode 100644 index bdf69760669e..000000000000 --- a/website/pages/sv/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Välkommen till din nya startplats 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Skapa en undergraf via Studio-gränssnittet -- Distribuera en undergraf med hjälp av CLI -- Publicera en subgraf med Studio-gränssnittet -- Testa den på lekplatsen -- Integrera den i staging med hjälp av fråge-URL: en -- Skapa och hantera API nycklar för specifika undergrafer - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Förfrågningar i undergrafer genererar förfrågningsavgifter som används för att belöna [Indexers](/network/indexing) på Graf-nätverket. Om du är en dapp-utvecklare eller subgrafutvecklare kommer studion att ge dig möjlighet att bygga bättre subgrafer för att driva dina eller din gemenskaps förfrågningar. Studion består av fem huvuddelar: - -- Kontroller för ditt användarkonto -- En lista över undergrafer som du har skapat -- En sektion för att hantera, visa detaljer och visualisera status för en specifik undergraf -- Ett avsnitt för att hantera dina API nycklar som du behöver för att ställa frågor till en subgraf -- Ett avsnitt för att hantera din fakturering - -## Så här Skapar du ditt Konto - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. När du har loggat in ser du din unika implementeringsnyckel på startsidan för ditt konto. Detta gör att du antingen kan publicera dina undergrafer eller hantera dina API nycklar + fakturering. Du kommer att ha en unik deployerings nyckel som kan genereras på nytt om du tror att den har äventyrats. - -## Hur man skapar en subgraf i Subgraf Studio - - - -## Kompatibilitet mellan undergrafer och grafnätet - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Indexera ett [stött nätverk](/developing/supported-networks) -- Får inte använda någon av följande egenskaper: - - ipfs.cat & ipfs.map - - Icke dödliga fel - - Ympning - -Fler funktioner & nätverk kommer att läggas till i The Graph Nätverk stegvis. - -### Livscykelflöde för undergraf - -![Livscykel för undergrafer](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testa din subgraf i Subgraph Studio - -Om du vill testa din subgraf innan du publicerar den i nätverket kan du göra det i Subgraf **Lekplats** eller titta på dina loggar. Subgrafloggarna talar om för dig **var** din subgraf misslyckas i de fall den gör det. - -## Publicera din subgraf i Subgraf Studio - -Du har klarat dig så här långt - grattis! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Kolla också in videoöversikten nedan: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexerare måste skicka in obligatoriska bevis på indexeringsposter från och med en specifik blockhash. Eftersom publicering av en subgraf är en åtgärd som vidtas på kedjan, kom ihåg att transaktionen kan ta upp till några minuter att gå igenom. Den adress som du använder för att publicera kontraktet kommer att vara den enda som kan publicera framtida versioner. Välj klokt! - -Undergrafer med kureringssignal visas för indexerare så att de kan indexeras i det decentraliserade nätverket. Du kan publicera undergrafer och signal i en transaktion, vilket gör att du kan mynta den första curation-signalen på undergrafen och spara på bensinkostnader. Genom att lägga till din signal till den signal som senare tillhandahålls av Curators, kommer din subgraf också att ha en större chans att i slutändan betjäna frågor. - -**Nu när du har publicerat din undergraf, låt oss gå in på hur du hanterar dem regelbundet.** Observera att du inte kan publicera din undergraf till nätverket om den har misslyckats med synkroniseringen. Detta beror vanligtvis på att undergrafen har buggar - loggarna kommer att berätta var dessa problem finns! - -## Versionera din Subgraf med CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Observera att det är förenat med kostnader att publicera en ny version av en undergraf i nätverket. Förutom transaktionsavgifterna måste utvecklarna också finansiera en del av kurationsskatten på den automatiskt migrerande signalen. Du kan inte publicera en ny version av din subgraf om kuratorer inte har signalerat om den. För mer information om riskerna med kuratering, läs mer [här](/network/curating). - -### Automatisk arkivering av versioner av undergrafer - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraf Studio - Avarkivera](/img/Unarchive.png) diff --git a/website/pages/sv/developing/creating-a-subgraph.mdx b/website/pages/sv/developing/creating-a-subgraph.mdx deleted file mode 100644 index 78fb1005c030..000000000000 --- a/website/pages/sv/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Skapa en Subgraph ---- - -En subgraph extraherar data från en blockchain, bearbetar den och lagrar den så att den kan frågas enkelt via GraphQL. - -![Definiera en Subgraph](/img/defining-a-subgraph.png) - -Subgraph-definitionen består av några filer: - -- `subgraph.yaml`: en YAML-fil som innehåller subgraph-manifestet - -- `schema.graphql`: ett GraphQL-schema som definierar vilka data som lagras för din subgraph och hur man frågar efter det via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) kod som översätter från händelsedata till de enheter som är definierade i ditt schema (t.ex. `mapping.ts` i den här handledningen) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Installera Graph CLI - -Graph CLI är skrivet i JavaScript, och du måste installera antingen `yarn` eller `npm` för att använda det; det antas att du har yarn i det följande. - -När du har `yarn`, installera Graph CLI genom att köra - -**Installera med yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Installera med npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## Från ett Befintligt kontrakt - -Följande kommando skapar en subgraf som indexerar alla händelser i ett befintligt kontrakt. Det försöker hämta kontraktets ABI från Etherscan och faller tillbaka till att begära en lokal filsökväg. Om något av de valfria argumenten saknas tar det dig genom ett interaktivt formulär. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` är ID för din subgraf i Subgraf Studio, det kan hittas på din subgraf detaljsida. - -## Från ett Exempel Subgraph - -Det andra läget som `graph init` stöder är att skapa ett nytt projekt från ett exempel på en undergraf. Följande kommando gör detta: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Lägg till nya datakällor i en befintlig Subgraf - -Från och med `v0.31.0` stöder `graph-cli` att lägga till nya datakällor i en befintlig subgraf genom kommandot `graph add`. - -```sh -graph add
[] - -Options: - - --abi Sökväg till kontraktets ABI (standard: nedladdning från Etherscan) - --contract-name Kontraktets namn (standard: Kontrakt) - --merge-entities Om enheter med samma namn ska slås samman (standard: false) - --network-file Sökväg till konfigurationsfil för nätverk (standard: "./networks.json") -``` - -Kommandot `add` hämtar ABI: en från Etherscan (om inte en ABI-sökväg anges med alternativet `--abi`) och skapar en ny `dataSource` på samma sätt som kommandot `graph init` skapar en `dataSource` `--from-contract`, och uppdaterar schemat och mappningarna därefter. - -Alternativet `--merge-entities` identifierar hur utvecklaren vill hantera konflikter med `entity`- och `event`-namn: - -- Om `true`: den nya `dataSource` ska använda befintliga `eventHandlers` & `entities`. -- Om `false`: en ny entitet och händelsehanterare ska skapas med `${dataSourceName}{EventName}`. - -Kontraktsadressen kommer att skrivas till `networks.json` för den relevanta nätverket. - -> **Obs:** När du använder det interaktiva kommandoraden, efter att ha kört `graph init` framgångsrikt, kommer du att bli ombedd att lägga till en ny `dataSource`. - -## Subgrafens manifest - -Subgrafens manifest `subgraph.yaml` definierar de smarta kontrakten som din subgraf indexerar, vilka händelser från dessa kontrakt som ska uppmärksammas och hur man kartlägger händelsedata till entiteter som Graph Node lagrar och tillåter att fråga. Den fullständiga specifikationen för subgrafens manifest finns [här](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -För exempelsubgrafen är `subgraph.yaml`: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -De viktiga posterna att uppdatera för manifestet är: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: en lista över alla använda [funktions](#experimentella-funktioner) namn. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: adressen till det smarta kontraktet som subgrafen hämtar data från, och ABI för det smarta kontraktet att använda. Adressen är valfri; att utelämna den gör det möjligt att indexera matchande händelser från alla kontrakt. - -- `dataSources.source.startBlock`: det valfria blocknummer som datakällan börjar indexera från. I de flesta fall föreslår vi att du använder det block där kontraktet skapades. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: de entiteter som datakällan skriver till lagringsplatsen. Schemat för varje entitet definieras i filen schema.graphql. - -- `dataSources.mapping.abis`: en eller flera namngivna ABI-filer för källkontraktet samt eventuella andra smarta kontrakt som du interagerar med från inom mappningarna. - -- `dataSources.mapping.eventHandlers`: listar de smarta kontraktshändelser som denna subgraf reagerar på och hanterare i mappningen—./src/mapping.ts i exemplet - som omvandlar dessa händelser till entiteter i lagringsplatsen. - -- `dataSources.mapping.callHandlers`: listar de smarta kontraktsfunktioner som denna subgraf reagerar på och hanterare i mappningen som omvandlar in- och utdata till funktionsanrop till entiteter i lagringsplatsen. - -- `dataSources.mapping.blockHandlers`: listar de block som denna subgraf reagerar på och hanterare i mappningen som körs när ett block läggs till i kedjan. Utan ett filter körs blockhanteraren varje block. En valfri anropsfiltrering kan tillhandahållas genom att lägga till en `filter`-fält med `kind: call` till hanteraren. Detta körs bara om blocket innehåller minst ett anrop till datakällan. - -En enskild subgraf kan indexera data från flera smarta kontrakt. Lägg till en post för varje kontrakt från vilket data behöver indexeras i `dataSources`-matrisen. - -### Order of Triggering Handlers - -Utlösarna för en datakälla inom ett block ordnas med hjälp av följande process: - -1. Händelse- och anropsutlösare ordnas först efter transaktionsindex inom blocket. -2. Händelse- och anropsutlösare inom samma transaktion ordnas med hjälp av en konvention: händelseutlösare först, sedan anropsutlösare, varje typ respekterar ordningen de definieras i manifestet. -3. Blockutlösare körs efter händelse- och anropsutlösare, i den ordning de definieras i manifestet. - -Dessa ordningsregler kan komma att ändras. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Versionsanteckningar | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Hämta ABI: erna - -ABI-filerna måste matcha ditt/dina kontrakt. Det finns några olika sätt att få ABI-filer: - -- Om du bygger ditt eget projekt har du förmodligen tillgång till dina senaste ABIs. -- Om du bygger en subgraf för ett offentligt projekt kan du ladda ner det projektet till din dator och få ABI:n genom att använda [`truffle compile`](https://truffleframework.com/docs/truffle/overview) eller använda solc för att kompilera. -- Du kan också hitta ABI:n på [Etherscan](https://etherscan.io/), men detta är inte alltid pålitligt, eftersom ABI:n som laddas upp där kan vara föråldrad. Se till att du har rätt ABI, annars kommer din subgraf att misslyckas när den körs. - -## GraphQL-schemat - -Schemat för din subgraf finns i filen `schema.graphql`. GraphQL-scheman definieras med hjälp av gränssnittsdefinitionsspråket för GraphQL. Om du aldrig har skrivit ett GraphQL-schema rekommenderas det att du kollar in denna introduktion till GraphQL-typsystemet. Referensdokumentation för GraphQL-scheman finns i avsnittet [GraphQL API](/querying/graphql-api). - -## Definition av entiteter - -Innan du definierar entiteter är det viktigt att ta ett steg tillbaka och tänka på hur din data är strukturerad och länkad. Alla frågor kommer att göras mot datamodellen som definieras i subgrafens schema och de entiteter som indexerats av subgraf. Därför är det bra att definiera subgrafens schema på ett sätt som matchar din dapp's behov. Det kan vara användbart att tänka på entiteter som "objekt som innehåller data", snarare än som händelser eller funktioner. - -Med The Graph definierar du helt enkelt entitetstyper i `schema.graphql`, och Graph Node kommer att generera toppnivåfält för att fråga enskilda instanser och samlingar av den entitetstypen. Varje typ som ska vara en entitet måste vara annoterad med en `@entity`-direktiv. Som standard är entiteter muterbara, vilket innebär att mappningar kan ladda befintliga entiteter, ändra dem och lagra en ny version av den entiteten. Mutabilitet har ett pris, och för entitetstyper där det är känt att de aldrig kommer att ändras, till exempel eftersom de helt enkelt innehåller data som extraherats ordagrant från kedjan, rekommenderas att markera dem som omutbara med `@entity(immutable: true)`. Mappningar kan göra ändringar i omutbara entiteter så länge dessa ändringar sker i samma block som entiteten skapades. Omutebara entiteter är mycket snabbare att skriva och att fråga, och bör därför användas när det är möjligt. - -### Bra exempel - -Entiteten `Gravatar` nedan är strukturerad kring ett Gravatar-objekt och är ett bra exempel på hur en entitet kan definieras. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Dåligt exempel - -Exemplen `GravatarAccepted` och `GravatarDeclined` nedan är baserade på händelser. Det rekommenderas inte att mappa händelser eller funktionsanrop till entiteter 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Valfria och obligatoriska fält - -Entitetsfält kan definieras som obligatoriska eller valfria. Obligatoriska fält anges med `!` i schemat. Om ett obligatoriskt fält inte har angetts i mappningen får du det här felmeddelandet när du frågar efter fältet: - -``` -Null value resolved for non-null field 'name' -``` - -Varje entitet måste ha ett `id`-fält, som måste vara av typen `Bytes!` eller `String!`. Det rekommenderas generellt att använda `Bytes!`, om inte `id` innehåller läsbar text, eftersom entiteter med `Bytes!`-id kommer att vara snabbare att skriva och fråga än de med ett `String!` `id`. `id`-fältet fungerar som primärnyckel och måste vara unikt bland alla entiteter av samma typ. Av historiska skäl accepteras också typen `ID!` och är en synonym för `String!`. - -För vissa entitetstyper konstrueras `id` från id:erna hos två andra entiteter; det är möjligt med `concat`, t.ex. `let id = left.id.concat(right.id)` för att bilda id från id:erna hos `left` och `right`. På liknande sätt kan för att konstruera ett id från id:et hos en befintlig entitet och en räknare `count` användas `let id = left.id.concatI32(count)`. Konkatineringen garanterar att producera unika id:er så länge längden av `left` är densamma för alla sådana entiteter, till exempel eftersom `left.id` är en `Address`. - -### Inbyggda Skalartyper - -#### GraphQL-Stödda Skalartyper - -Vi stödjer följande skalartyper i vår GraphQL API: - -| Typ | Beskrivning | -| --- | --- | -| `Bytes` | Bytematris, representerad som en hexadecimal sträng. Vanligt används för Ethereum-hashar och adresser. | -| `String` | Skalär för `string`-värden. Nolltecken stöds inte och tas automatiskt bort. | -| `Boolean` | Skalär för `boolean`-värden. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Stora heltal. Används för Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` typer. Observera: Allt under `uint32`, som `int32`, `uint24` eller `int8` representeras som `i32`. | -| `BigDecimal` | `BigDecimal` Högprecisionsdecimaler representerade som en signifikant och en exponent. Exponentområdet är från −6143 till +6144. Avrundat till 34 signifikanta siffror. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -Du kan också skapa enums inom ett schema. Enums har följande syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -När enumet är definierat i schemat kan du använda enumvärdenas strängrepresentation för att ställa in ett enumfält på en entitet. Till exempel kan du ställa in `tokenStatus` till `SecondOwner` genom att först definiera din entitet och sedan ställa in fältet med `entity.tokenStatus = "SecondOwner"`. Exemplet nedan visar hur Token-entiteten skulle se ut med ett enumfält: - -Mer detaljer om att skriva enums finns i [GraphQL-dokumentationen](https://graphql.org/learn/schema/). - -#### Entitetsrelationer - -En entitet kan ha en relation till en eller flera andra entiteter i ditt schema. Dessa relationer kan traverseras i dina frågor. Relationer i The Graph är enriktade. Det är möjligt att simulera dubbelriktade relationer genom att definiera en enriktad relation på antingen den ena "änden" av relationen. - -Relationer definieras på entiteter precis som vilket annat fält som helst, förutom att den specificerade typen är en annan entitet. - -#### En-till-en-relationer - -Definiera en entitetstyp `Transaction` med en valfri en-till-en-relation till en entitetstyp `TransactionReceipt`: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### En-till-många-relationer - -Definiera en entitetstyp `TokenBalance` med ett obligatoriskt en-till-many förhållande med en entitetstyp Token: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Omvända sökningar - -Omvända sökningar kan definieras på en entitet genom fältet `@derivedFrom`. Det skapar ett virtuellt fält på entiteten som kan frågas, men som inte kan ställas in manuellt via mappings API. Istället härleds det från den relation som är definierad på den andra entiteten. För sådana relationer är det sällan meningsfullt att lagra båda sidor av relationen, och både indexering och frågeprestanda blir bättre när bara en sida lagras och den andra härleds. - -För en-till-många-relationer bör relationen alltid lagras på 'en'-sidan, och 'många'-sidan bör alltid härledas. Att lagra relationen på detta sätt, istället för att lagra en array av entiteter på 'många'-sidan, kommer att resultera i dramatiskt bättre prestanda både för indexering och för frågning av subgraphen. Generellt sett bör lagring av arrayer av entiteter undvikas så mycket som är praktiskt möjligt. - -#### Exempel - -Vi kan göra balanserna för en token åtkomliga från token genom att härleda ett fält `tokenBalances`: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Många-till-många-relationer - -För många-till-många-relationer, som till exempel användare som var och en kan tillhöra ett antal organisationer, är det mest raka, men generellt sett inte den mest prestanda-optimerade, sättet att modellera relationen som en array i vardera av de två entiteter som är involverade. Om relationen är symmetrisk behöver bara ena sidan av relationen lagras och den andra sidan kan härledas. - -#### Exempel - -Definiera en omvänd sökning från en entitet av typen `Användare` till en entitet av typen `Organisation`. I exemplet nedan uppnås detta genom att söka upp attributet `medlemmar` inom entiteten `Organisation`. I frågor kommer fältet `organisationer` på `Användare` att lösas genom att hitta alla `Organisations`-entiteter som inkluderar användarens ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -Ett mer effektivt sätt att lagra denna relation är genom en mappningstabell som har en post för varje `User` / `Organization`-par med ett schema som - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -Detta tillvägagångssätt kräver att frågorna går ner till ytterligare en nivå för att hämta t. ex. organisationer för användare: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -Detta mer avancerade sätt att lagra många-till-många-relationer kommer att leda till att mindre data lagras för subgrafen, och därför till en subgraf som ofta är dramatiskt snabbare att indexera och att fråga. - -#### Lägga till kommentarer i schemat - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Definiera fält för fulltextsökning - -Fulltextsökningar filtrerar och rangordnar entiteter baserat på en textinmatning för sökning. Fulltextförfrågningar kan returnera träffar för liknande ord genom att bearbeta söktexten till stammar innan de jämförs med den indexerade textdata. - -En fulltextförfrågningsdefinition inkluderar förfrågningsnamnet, ordboken som används för att bearbeta textfälten, rangordningsalgoritmen som används för att ordna resultaten och fälten som ingår i sökningen. Varje fulltextförfrågan kan omfatta flera fält, men alla inkluderade fält måste vara från en enda entitetstyp. - -För att lägga till en fulltextförfrågan inkludera en typ `_Schema_` med en fulltextdirektiv i GraphQL-schemat. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -Exempelfältet `bandSearch` kan användas i frågor för att filtrera `Band`-entiteter baserat på textdokumenten i fälten `name`, `description` och `bio`. Gå till [GraphQL API - Frågor](/querying/graphql-api#queries) för en beskrivning av API:et för fulltextsökning och fler exempel på användning. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Funktionshantering](#experimental-features):** Från `specVersion` `0.0.4` och framåt måste `fullTextSearch` deklareras under avsnittet `features` i subgraph-manifestet. - -### Stödda språk - -Att välja ett annat språk kommer att ha en definitiv, om än ibland subtil, effekt på fulltext-sök-API:en. Fält som omfattas av en fulltextförfrågningsfunktion granskas i kontexten av det valda språket, så lexem som produceras av analys och sökfrågor varierar från språk till språk. Till exempel: när det används det stödda turkiska ordboken "token" så avstamsas det till "toke", medan engelska ordboken självklart avstammar det till "token". - -Stödda språkordböcker: - -| Kod | Ordbok | -| ----- | ------------ | -| enkel | Allmän | -| da | Danska | -| nl | Holländska | -| en | Engelska | -| fi | Finska | -| fr | Franska | -| de | Tyska | -| hu | Ungerska | -| it | Italienska | -| no | Norska | -| pt | Portugisiska | -| ro | Rumänska | -| ru | Ryska | -| es | Spanska | -| sv | Svenska | -| tr | Turkiska | - -### Rankningsalgoritmer - -Stödda algoritmer för att ordna resultat: - -| Algoritm | Beskrivning | -| ------------- | ---------------------------------------------------------------------------------- | -| rank | Använd matchningskvaliteten (0-1) från fulltextförfrågan för att ordna resultaten. | -| proximityRank | Liknande rank, men inkluderar också närheten av träffarna. | - -## Skriv Mappningar - -Mappningar tar data från en specifik källa och omvandlar den till entiteter som är definierade i din schema. Mappningar skrivs i en delmängd av [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) som kallas [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) som kan kompileras till WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript är strängare än vanlig TypeScript, men erbjuder en bekant syntax. - -För varje händelsehanterare som är definierad i `subgraph.yaml` under `mapping.eventHandlers`, skapa en exporterad funktion med samma namn. Varje hanterare måste acceptera en enda parameter med namnet `event` med en typ som motsvarar namnet på händelsen som hanteras. - -I det här exempelsubgraphet innehåller `src/mapping.ts` hanterare för händelserna `NewGravatar` och `UpdatedGravatar`: - -```javascript -import { NewGravatar, UpdatedGravatar } from "../generated/Gravity/Gravity"; -import { Gravatar } from "../generated/schema"; - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id); - gravatar.owner = event.params.owner; - gravatar.displayName = event.params.displayName; - gravatar.imageUrl = event.params.imageUrl; - gravatar.save(); -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id; - let gravatar = Gravatar.load(id); - if (gravatar == null) { - gravatar = new Gravatar(id); - } - gravatar.owner = event.params.owner; - gravatar.displayName = event.params.displayName; - gravatar.imageUrl = event.params.imageUrl; - gravatar.save(); -} -``` - -Den första hanteraren tar en `NewGravatar`-händelse och skapar en ny `Gravatar`-entitet med `new Gravatar(event.params.id.toHex())`, fyller i entitetsfälten med hjälp av motsvarande händelseparametrar. Denna entitetsinstans representeras av variabeln `gravatar`, med ett id-värde av `event.params.id.toHex()`. - -Den andra hanteraren försöker ladda den befintliga `Gravatar` från Graph Node-lagringen. Om den inte finns ännu skapas den på begäran. Entiteten uppdateras sedan för att matcha de nya händelseparametrarna innan den sparas tillbaka till lagringen med `gravatar.save()`. - -### Rekommenderade ID:n för att skapa nya entiteter - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Kodgenerering - -För att göra det enkelt och typsäkert att arbeta med smarta kontrakt, händelser och entiteter kan Graph CLI generera AssemblyScript-typer från subgrafens GraphQL-schema och kontrakts-ABIn som ingår i datakällorna. - -Detta görs med - -```sh -graph codegen [--output-dir ] [] -``` - -men i de flesta fall är undergrafer redan förkonfigurerade via `package.json` så att du helt enkelt kan köra en av följande för att uppnå samma sak: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Detta genererar en AssemblyScript-klass för varje smart kontrakt i ABI-filerna som nämns i `subgraph.yaml`, så att du kan binda dessa kontrakt till specifika adresser i mappningarna och anropa skrivskyddade kontraktsmetoder mot det block som bearbetas. Den kommer också att generera en klass för varje kontraktshändelse för att ge enkel åtkomst till händelseparametrar, samt blocket och transaktionen som händelsen härstammar från. Alla dessa typer skrivs till `//.ts`. I undergrafen i exemplet skulle detta vara `generated/Gravity/Gravity.ts`, vilket gör att mappningar kan importera dessa typer med. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -Utöver detta genereras en klass för varje entitetstyp i subgrafens GraphQL-schema. Dessa klasser tillhandahåller typsäker entitetsladdning, läs- och skrivåtkomst till entitetsfält samt en `save()`-metod för att skriva entiteter till lagret. Alla entitetsklasser skrivs till `/schema.ts`, vilket gör att mappningar kan importera dem med - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Observera:** Kodgenerering måste utföras igen efter varje ändring av GraphQL-schemat eller ABIn som ingår i manifestet. Det måste också utföras minst en gång innan du bygger eller distribuerar subgrafet. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Datakällmallar - -En vanlig mönster i EVM-kompatibla smarta kontrakt är användningen av register- eller fabrikskontrakt, där ett kontrakt skapar, hanterar eller hänvisar till ett godtyckligt antal andra kontrakt som var och en har sin egen stat och händelser. - -Adresserna till dessa underkontrakt kan eller kanske inte vara kända på förhand, och många av dessa kontrakt kan skapas och/eller läggas till över tid. Det är därför, i sådana fall, som det är omöjligt att definiera en enda datakälla eller ett fast antal datakällor och en mer dynamisk metod behövs: _datakällmallar_. - -### Datakälla för huvudkontraktet - -Först definierar du en vanlig datakälla för huvudkontraktet. Snutten nedan visar ett förenklat exempel på en datakälla för [Uniswap](https://uniswap.org) utbytesfabrikskontrakt. Observera `NewExchange(address,address)` händelsehanteraren. Denna händelse emitteras när en ny utbyteskontrakt skapas på kedjan av fabrikskontraktet. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Datakällmallar för dynamiskt skapade kontrakt - -Sedan lägger du till _datakällmallar_ i manifestet. Dessa är identiska med vanliga datakällor, förutom att de saknar en fördefinierad avtalsadress under `source`. Vanligtvis definierar du en mall för varje typ av underkontrakt som hanteras eller refereras till av det överordnade kontraktet. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instansiering av en mall för datakälla - -I det sista steget uppdaterar du mappningen av huvudkontraktet för att skapa en dynamisk datakällinstans från en av mallarna. I det här exemplet ändrar du mappningen av huvudkontraktet för att importera mallen `Exchange` och anropar metoden `Exchange.create(address)` för att börja indexera det nya växlingskontraktet. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> ** Notera:** En ny datakälla bearbetar endast anrop och händelser för det block där den skapades och alla efterföljande block, men bearbetar inte historiska data, dvs. data som finns i tidigare block. -> -> Om tidigare block innehåller data som är relevanta för den nya datakällan, är det bäst att indexera dessa data genom att läsa kontraktets aktuella status och skapa enheter som representerar denna status vid den tidpunkt då den nya datakällan skapas. - -### Kontext för datakälla - -Datakällans kontext gör det möjligt att skicka extra konfiguration när en mall instansieras. I vårt exempel kan vi säga att börser är associerade med ett visst handelspar, vilket ingår i händelsen `NewExchange`. Den informationen kan skickas till den instansierade datakällan, så här: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inuti en mappning av mallen `Exchange` kan kontexten sedan nås: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Det finns sättare och hämtare som `setString` och `getString` för alla värdestyper. - -## Startblock - -`startBlock` är en valfri inställning som låter dig definiera från vilken block i kedjan datakällan ska börja indexera. Genom att ställa in startblocket kan datakällan hoppa över potentiellt miljontals block som är irrelevanta. Vanligtvis kommer en subgrafutvecklare att ställa in `startBlock` till blocket där datakällans smarta kontrakt skapades. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Observera:** Blocket där kontraktet skapades kan snabbt sökas upp på Etherscan: -> -> 1. Sök efter kontraktet genom att ange dess adress i sökfältet. -> 2. Klicka på transaktionshashen för skapandet i avsnittet `Kontraktsskapare`. -> 3. Ladda sidan med transaktionsdetaljer där du hittar startblocket för det kontraktet. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Anropsbehandlare - -Medan händelser ger ett effektivt sätt att samla in relevanta ändringar av ett kontrakts tillstånd, undviker många kontrakt att generera loggar för att optimera gasavgifterna. I dessa fall kan en subgraf prenumerera på anrop som görs till datakällans kontrakt. Detta uppnås genom att definiera anropsbehandlare som refererar till funktions signaturen och hanteraren som kommer att bearbeta anrop till denna funktion. För att bearbeta dessa anrop kommer hanteraren att ta emot ett `ethereum.Call` som ett argument med de typade in- och utdata från anropet. Anrop som görs på vilken djupnivå som helst i en transaktions anropskedja kommer att utlösa kartläggningen, vilket gör det möjligt att fånga aktivitet med datakällan genom proxykontrakt. - -Anropsbehandlare utlöses endast i ett av två fall: när den specificerade funktionen anropas av ett konto som inte är kontraktet självt eller när den är markerad som extern i Solidity och anropas som en del av en annan funktion i samma kontrakt. - -> **Observera:** Anropsbehandlare är för närvarande beroende av Paritys spårnings-API. Vissa nätverk, som BNB-kedjan och Arbitrum, stöder inte denna API. Om en subgraf som indexerar ett av dessa nätverk innehåller en eller flera anropsbehandlare kommer den inte att börja synkroniseras. Subgrafutvecklare bör istället använda händelsehanterare. Dessa är mycket mer prestandaoptimerade än anropsbehandlare och stöds på alla evm-nätverk. - -### Definiera en Anropsbehandlare - -För att definiera en anropsbehandlare i din manifest, lägg helt enkelt till en `callHandlers`-array under den datakälla du vill prenumerera på. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`function` är den normaliserade funktions signaturen för att filtrera anrop efter. Egenskapen `handler` är namnet på funktionen i din kartläggning som du vill utföra när målfunktionen anropas i datakällans kontrakt. - -### Kartläggningsfunktion - -Varje anropsbehandlare tar en enda parameter med en typ som motsvarar namnet på den kallade funktionen. I det ovanstående exempelsubgrafet innehåller kartläggningen en hanterare för när funktionen `createGravatar` anropas och tar emot en `CreateGravatarCall`-parameter som ett argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -Funktionen `handleCreateGravatar` tar emot ett nytt `CreateGravatarCall`, som är en underklass av `ethereum.Call`, tillhandahållen av `@graphprotocol/graph-ts`, som inkluderar de typade in- och utmatningarna från anropet. Typen `CreateGravatarCall` genereras för dig när du kör `graph codegen`. - -## Blockbehandlare - -Förutom att prenumerera på kontrakts händelser eller funktionsanrop kan en subgraf vilja uppdatera sina data när nya block läggs till i kedjan. För att uppnå detta kan en subgraf köra en funktion efter varje block eller efter block som matchar en fördefinierad filter. - -### Stödda filter - -#### Anropsfilter - -```yaml -filter: - kind: call -``` - -_Den definierade hanteraren kommer att anropas en gång för varje block som innehåller ett anrop till det kontrakt (datakälla) som hanteraren är definierad under._ - -> **Observera:** `call`-filtret är för närvarande beroende av Parity-tracing-API: et. Vissa nätverk, som BNB-kedjan och Arbitrum, stöder inte detta API. Om en subgraf som indexerar ett av dessa nätverk innehåller en eller flera blockhanterare med ett `call`-filter, kommer den inte att börja synkronisera. - -Avsaknaden av ett filter för en blockhanterare kommer att säkerställa att hanteraren kallas för varje block. En datakälla kan endast innehålla en blockhanterare för varje filttyp. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Undersökningsfilter - -> **Requires `specVersion` >= 0.0.8** - -> **Observera:** Undersökningsfilter är endast tillgängliga på datakällor av typen `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -Den definierade hanteraren kommer att kallas en gång för varje `n` block, där `n` är värdet som anges i fältet `every`. Denna konfiguration möjliggör för delgrafer att utföra specifika operationer med regelbundna blockintervall. - -#### En Gång Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Observera:** En gång-filtrar är endast tillgängliga på datakällor av typen `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -Den definierade hanteraren med filtret once kommer att anropas endast en gång innan alla andra hanterare körs. Denna konfiguration gör det möjligt för subgrafen att använda hanteraren som en initialiseringshanterare, som utför specifika uppgifter i början av indexeringen. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Kartläggningsfunktion - -Mappningsfunktionen tar emot ett `ethereum.Block` som sitt enda argument. Liksom mappningsfunktioner för händelser kan denna funktion komma åt befintliga subgrafiska enheter i lagret, anropa smarta kontrakt och skapa eller uppdatera enheter. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonyma händelser - -Om du behöver behandla anonyma händelser i Solidity kan du göra det genom att ange händelsens ämne 0, som i exemplet: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -En händelse utlöses endast när både signaturen och topic0 matchar. Som standard är `topic0` lika med hashtillståndet för händelsesignaturen. - -## Transaktionskvitton i Händelsehanterare - -Från och med `specVersion` `0.0.5` och `apiVersion` `0.0.7` kan händelsehanterare få tillgång till kvittot för den transaktion som emitterade dem. - -För att göra detta måste händelsehanterare deklareras i delgrafmanifestet med den nya nyckeln `receipt: true`, vilket är valfritt och som standard är falskt. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inuti hanterarfunktionen kan kvittot nås i fältet `Event.receipt`. När nyckeln `receipt` är inställd som `false` eller utelämnad i manifestet, kommer istället ett `null`-värde att returneras. - -## Experimentella funktioner - -Från och med `specVersion` `0.0.4` måste delgrafsfunktioner deklareras explicit i avsnittet `features` högst upp i manifestfilen, med deras `camelCase`-namn, som listas i tabellen nedan: - -| Funktion | Namn | -| ---------------------------------------------------- | ---------------- | -| [Icke dödliga fel](#non-fatal-errors) | `nonFatalErrors` | -| [Fulltextssökning](#defining-fulltext-search-fields) | `nonFatalErrors` | -| [Ympning](#grafting-onto-existing-subgraphs) | `grafting` | - -Till exempel, om en delgraf använder funktionerna **Fulltextssökning** och **Icke dödliga fel**, ska fältet `features` i manifestet vara: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Observera att att använda en funktion utan att deklarera den kommer att resultera i en **valideringsfel** under delgrafens distribution, men inga fel uppstår om en funktion deklareras men inte används. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Icke dödliga fel - -Indexeringsfel på redan synkroniserade delgrafer kommer, som standard, att få delgrafen att misslyckas och sluta synkronisera. Delgrafer kan istället konfigureras för att fortsätta synkroniseringen i närvaro av fel, genom att ignorera ändringarna som orsakades av hanteraren som provocerade felet. Det ger delgrafsförfattare tid att korrigera sina delgrafer medan förfrågningar fortsätter att behandlas mot det senaste blocket, även om resultaten kan vara inkonsekventa på grund av felet som orsakade felet. Observera att vissa fel alltid är dödliga. För att vara icke-dödliga måste felet vara känt för att vara deterministiskt. - -> **Observera:** The Graph Nätverk stöder ännu inte icke-dödliga fel, och utvecklare bör inte distribuera delgrafer med den funktionaliteten till nätverket via Studio. - -Aktivering av icke-dödliga fel kräver att följande funktionsflagga sätts i delgrafens manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -Frågan måste också välja att fråga efter data med potentiella inkonsekvenser genom argumentet `subgraphError`. Det rekommenderas också att fråga `_meta` för att kontrollera om subgrafen har hoppat över fel, som i exemplet: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Om subgrafen stöter på ett fel returnerar frågan både data och ett graphql-fel med meddelandet `"indexing_error"`, som i detta exempelsvar: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Ympning på befintliga delgrafer - -> **Observera:** Det rekommenderas inte att använda ympning vid initial uppgradering till The Graph Nätverk. Läs mer [här](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -När en delgraf först distribueras börjar den indexera händelser från genesisblocket på den motsvarande kedjan (eller på `startBlock` som är definierat för varje datakälla). I vissa situationer kan det vara fördelaktigt att återanvända data från en befintlig delgraf och börja indexera vid en mycket senare block. Denna indexeringsläge kallas _Ympning_. Ympning är exempelvis användbart under utvecklingen för att snabbt komma förbi enkla fel i mappningarna eller tillfälligt få en befintlig delgraf att fungera igen efter att den har misslyckats. - -En delgraf ympas på en grunddelgraf när delgrafmanifestet i `subgraph.yaml` innehåller en `graft`-block högst upp: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -När en delgraf vars manifest innehåller en `graft`-sektion distribueras kommer Graph Node att kopiera data från den `base` delgrafen upp till och inklusive det angivna `block` och sedan fortsätta indexera den nya delgrafen från det blocket. Basdelgrafen måste finnas på målnoden Graph Node och måste ha indexerats upp till minst det angivna blocket. På grund av denna begränsning bör ympning endast användas under utveckling eller i en nödsituation för att snabba upp produktionen av en motsvarande icke-ympad delgraf. - -Eftersom ympning kopierar data istället för att indexera basdata går det mycket snabbare att få delgrafen till det önskade blocket än att indexera från början, även om den initiala datorkopieringen fortfarande kan ta flera timmar för mycket stora delgrafer. Medan den ympade delgrafen initialiseras kommer Graph Node att logga information om de entitetstyper som redan har kopierats. - -Den ympade delgrafen kan använda ett GraphQL-schema som inte är identiskt med basdelgrafens, men bara kompatibelt med den. Det måste vara ett giltigt delgrafschema i sig själv, men kan avvika från basdelgrafens schema på följande sätt: - -- Den lägger till eller tar bort entitetstyper -- Den tar bort attribut från entitetstyper -- Den lägger till nollställbara attribut till entitetstyper -- Den gör icke-nollställbara attribut till nollställbara attribut -- Den lägger till värden till enum -- Den lägger till eller tar bort gränssnitt -- Den ändrar vilka entitetstyper som ett gränssnitt är implementerat för - -> **[Funktionshantering](#experimental-features):** `grafting` måste deklareras under `features` i delgrafens manifest. - -## IPFS/Arweave File Data Sources - -Filbaserade datakällor är en ny delgrafsfunktion för att få tillgång till data utanför kedjan under indexering på ett robust, utökat sätt. Filbaserade datakällor stödjer hämtning av filer från IPFS och från Arweave. - -> Detta lägger också grunden för deterministisk indexering av data utanför kedjan, samt möjligheten att introducera godtycklig data som hämtas via HTTP. - -### Översikt - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> Detta ersätter den befintliga `ipfs.cat` API - -### Uppgraderingsguide - -#### Uppdatera `graph-ts` och `graph-cli` - -Filbaserade datakällor kräver graph-ts >=0.29.0 och graph-cli >=0.33.1 - -#### Lägg till en ny entitetstyp som kommer att uppdateras när filer hittas - -Filbaserade datakällor kan inte komma åt eller uppdatera kedjebaserade entiteter, utan måste uppdatera filspecifika entiteter. - -Detta kan innebära att fält från befintliga entiteter separeras i separata entiteter som är kopplade ihop. - -Ursprunglig kombinerad entitet: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Ny, delad enhet: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Om relationen är 1:1 mellan föräldraentiteten och den resulterande filbaserade datakälla entiteten är det enklaste mönstret att länka föräldraentiteten till en resulterande filbaserad entitet genom att använda IPFS CID som söknyckel. Kontakta oss på Discord om du har svårt att modellera dina nya filbaserade entiteter! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Lägg till en ny mallbaserad datakälla med `kind: file/ipfs` eller `kind: file/arweave` - -Detta är datakällan som skapas när en intressant fil identifieras. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> För närvarande krävs `abis`, även om det inte är möjligt att anropa kontrakt från filbaserade datakällor - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Skapa en ny hanterare för att bearbeta filer - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -CID för filen som en läsbar sträng kan nås via `dataSource` enligt följande: - -```typescript -const cid = dataSource.stringParam() -``` - -Exempel på hanterare: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Skapa filbaserade datakällor vid behov - -Nu kan du skapa filbaserade datakällor under utförandet av kedjebaserade hanterare: - -- Importera mallen från den automatiskt genererade `templates` -- anropa `TemplateName.create(cid: string)` från en mappning, där cid är en giltig innehållsidentifierare för IPFS eller Arweave - -För IPFS stöder Graph Node [v0 och v1 innehållsidentifierare](https://docs.ipfs.tech/concepts/content-addressing/), och innehållsidentifierare med kataloger (t.ex. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Exempel: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Denna exempelkod är för en undergraf för kryptosamverkan. Ovanstående ipfs-hash är en katalog med tokenmetadata för alla kryptosamverkande NFT:er. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //Detta skapar en sökväg till metadata för en enskild Crypto coven NFT. Den konkaterar katalogen med "/" + filnamn + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -Detta kommer att skapa en ny filbaserad datakälla som kommer att övervaka Graph Nodes konfigurerade IPFS- eller Arweave-slutpunkt och försöka igen om den inte hittas. När filen hittas kommer filbaserad datakälla hanteraren att köras. - -I det här exemplet används CID som koppling mellan förälderentiteten `Token` och den resulterande entiteten `TokenMetadata`. - -> Tidigare är detta det punkt där en delgrafutvecklare skulle ha anropat `ipfs.cat(CID)` för att hämta filen - -Grattis, du använder filbaserade datakällor! - -#### Distribuera dina delgrafer - -Du kan nu `bygga` och `distribuera` dina delgrafer till en Graph Node >=v0.30.0-rc.0. - -#### Begränsningar - -Filbaserade datakällahanterare och entiteter är isolerade från andra delgrafentiteter, vilket säkerställer att de är deterministiska när de körs och att ingen förorening av kedjebaserade datakällor sker. För att vara specifik: - -- Entiteter skapade av Filbaserade datakällor är oföränderliga och kan inte uppdateras -- Filbaserade datakällahanterare kan inte komma åt entiteter från andra filbaserade datakällor -- Entiteter associerade med filbaserade datakällor kan inte nås av kedjebaserade hanterare - -> Även om denna begränsning inte bör vara problematisk för de flesta användningsfall kan den införa komplexitet för vissa. Var god kontakta oss via Discord om du har problem med att modellera din data baserad på fil i en delgraf! - -Dessutom är det inte möjligt att skapa datakällor från en filbaserad datakälla, vare sig det är en datakälla på kedjan eller en annan filbaserad datakälla. Denna begränsning kan komma att hävas i framtiden. - -#### Bästa praxis - -Om du länkar NFT-metadata till motsvarande token, använd metadata IPFS-hash för att referera till en Metadata-entitet från Token-entiteten. Spara Metadata-entiteten med IPFS-hash som ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -Om du har entiteter som uppdateras flera gånger, skapa unika filbaserade entiteter med IPFS-hash & entitets-ID, och referera till dem med hjälp av ett härlett fält i kedjebaserade entiteten. - -> Vi arbetar med att förbättra rekommendationen ovan så att förfrågningar endast returnerar den "senaste" versionen - -#### Kända problem - -Filbaserade datakällor kräver för närvarande ABIs, även om ABIs inte används ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Ett arbetsområde är att lägga till en ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Exempel - -[Crypto Coven Migration av undergrafer](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Referenser - -[GIP Filbaserade datakällor](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/sv/developing/creating-a-subgraph/_meta.js b/website/pages/sv/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/sv/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sv/developing/graph-ts/_meta.js b/website/pages/sv/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/sv/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sv/managing/deprecate-a-subgraph.mdx b/website/pages/sv/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/sv/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/sv/mips-faqs.mdx b/website/pages/sv/mips-faqs.mdx deleted file mode 100644 index 2f53debe4124..000000000000 --- a/website/pages/sv/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Vanliga Frågor om MIPs ---- - -## Introduktion - -> Observera: MIPs-programmet är avslutat sedan maj 2023. Tack till alla Indexers som deltog! - -Det är en spännande tid att delta i The Graph-ekosystemet! Under [Graph Day 2022](https://thegraph.com/graph-day/2022/) tillkännagav Yaniv Tal [avslutningen av den hostade tjänsten](https://thegraph.com/blog/sunsetting-hosted-service/), ett ögonblick som The Graph-ekosystemet har arbetat mot i många år. - -För att stödja avslutningen av den hostade tjänsten och migrationen av all dess aktivitet till det decentraliserade nätverket har The Graph Foundation tillkännagivit [Migration Infrastructure Providers (MIPs) programmet](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -MIPs-programmet är ett incitamentsprogram för Indexers för att stödja dem med resurser att indexera kedjor bortom Ethereum-huvudnätet och hjälpa The Graph-protokollet att expandera det decentraliserade nätverket till en flerlagers infrastruktur. - -MIPs-programmet har allokerat 0,75% av GRT-försörjningen (75M GRT), med 0,5% för att belöna Indexers som bidrar till att starta nätverket och 0,25% som tilldelats Network Grants för subgraph-utvecklare som använder flerlags-subgraphs. - -### Användbara Resurser - -- [Indexer 2ools från Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [Så blir du en effektiv Indexer på The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Tilldelningsoptimerare](https://github.com/graphprotocol/allocationopt.jl) -- [Verktyg för Tilldelningsoptimering](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Är det möjligt att generera ett giltigt bevis för indexering (POI) även om en subgraph har misslyckats? - -Ja, det är faktiskt möjligt. - -För sammanhang specificerar skiljedomstolsstadgan [läs mer om stadgan här](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) metoden för att generera ett POI för en misslyckad subgraph. - -En medlem av communityn, [SunTzu](https://github.com/suntzu93), har skapat ett skript för att automatisera denna process i enlighet med stadgans metodik. Kolla in repositoriet [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Vilken kedja kommer MIPs-programmet att incitamenta först? - -Den första kedjan som kommer att stödjas på det decentraliserade nätverket är Gnosis Chain! Tidigare känd som xDAI är Gnosis Chain en EVM-baserad kedja. Gnosis Chain valdes som den första med tanke på användarvänlighet för att köra noder, Indexer-readiness, överensstämmelse med The Graph och användning inom web3. - -### 3. Hur kommer nya kedjor att läggas till i MIPs-programmet? - -Nya kedjor kommer att tillkännages under MIPs-programmet, baserat på Indexer-readiness, efterfrågan och communityns inställning. Kedjor kommer först att stödjas på testnätet och sedan kommer en GIP att antas för att stödja den kedjan på huvudnätet. Indexers som deltar i MIPs-programmet kommer att välja vilka kedjor de är intresserade av att stödja och kommer att tjäna belöningar per kedja, utöver att tjäna frågeavgifter och indexbelöningar på nätverket för att betjäna subgraphs. MIPs-deltagare kommer att poängsättas baserat på sin prestation, förmåga att betjäna nätverksbehoven och communitystöd. - -### 4. Hur vet vi när nätverket är redo för en ny kedja? - -The Graph Foundation kommer att övervaka QoS-prestandamätningar, nätverksprestanda och communitykanaler för att bäst bedöma beredskapen. Prioriteten är att säkerställa att nätverket uppfyller prestandakraven för de multi-chain dapps att kunna migrera sina subgraphs. - -### 5. Hur fördelas belöningar per kedja? - -Eftersom kedjor varierar i sina krav för synkronisering av noder och de skiljer sig åt i fråga om frågevolym och användning, kommer belöningar per kedja att beslutas i slutet av den kedjans cykel för att säkerställa att all feedback och lärdomar fångas upp. Dock kommer Indexers när som helst också att kunna tjäna frågeavgifter och indexbelöningar när kedjan stöds på nätverket. - -### 6. Behöver vi indexera alla kedjor i MIPs-programmet eller kan vi välja bara en kedja och indexera den? - -Du är välkommen att indexera vilken kedja du vill! Målet med MIPs-programmet är att rusta Indexers med verktyg och kunskap att indexera de kedjor de önskar och stödja de web3-ekosystem de är intresserade av. Men för varje kedja finns det faser från testnätet till huvudnätet. Se [MIPs-notionssidan](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) för att lära dig mer om faserna. - -### 7. När kommer belöningarna att distribueras? - -MIPs-belöningar kommer att distribueras per kedja när prestandamätningarna uppfylls och migrerade subgraphs stöds av de Indexers. Håll utkik efter information om totala belöningar per kedja halvvägs genom den kedjans cykel. - -### 8. Hur fungerar poängsättning? - -Indexers kommer att tävla om belöningar baserat på poängsättning under hela programmet på ledartavlan. Programmets poängsättning kommer att baseras på: - -**Täckning av Subgraph** - -- Ger du maximalt stöd för subgraphs per kedja? - -- Under MIPs förväntas stora Indexers satsa 50%+ av subgraphs per kedja de stöder. - -**Tjänstekvalitet** - -- Tjänar Indexern kedjan med god tjänstekvalitet (latens, färsk data, drifttid, osv.)? - -- Stöder Indexern dapp-utvecklare genom att vara lyhörd för deras behov? - -Allokerar Indexer effektivt och bidrar till nätverkets övergripande hälsa? - -**Stöd till gemenskapen** - -- Samarbetar indexeraren med andra indexerare för att hjälpa dem att förbereda sig för multikedjor? - -- Ger Indexer feedback till kärnutvecklare genom hela programmet eller delar information med Indexerare i forumet? - -### 9. Hur kommer Discord-rollen att tilldelas? - -Moderatorer kommer att tilldela rollerna under de närmaste dagarna. - -### 10. Är det okej att starta programmet på ett testnät och sedan byta till Mainnet? Kommer du att kunna identifiera min nod och ta hänsyn till den när du delar ut belöningar? - -Ja, det förväntas faktiskt av dig att göra det. Flera faser är på Görli och en är på mainnet. - -### 11. När förväntar du dig att deltagarna lägger till en distribution av ett mainnet? - -Det kommer att finnas ett krav på att ha en mainnet indexerare under fas 3. Mer information om detta kommer att [delas på denna begreppssida inom kort] (https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Kommer belöningar att bli föremål för intjänande? - -Den procentandel som ska distribueras i slutet av programmet kommer att vara föremål för intjänande. Mer information om detta kommer att ges i indexeringsavtalet. - -### 13. För lag med mer än en medlem, kommer alla lagmedlemmar att få en MIPs Discord-roll? - -Ja - -### 14. Är det möjligt att använda de låsta tokens från grafkuratorprogrammet för att delta i MIPs testnät? - -Ja - -### 15. Kommer det att finnas en period för att bestrida ogiltiga POI under MIPs-programmet? - -Som skall beslutas. Vänligen återvänd till den här sidan med jämna mellanrum för mer information om detta eller om din förfrågan är brådskande, vänligen maila info@thegraph.foundation - -### 17. Kan vi kombinera två intjänandeavtal? - -Nej. Alternativen är: du kan delegera en till den andra eller köra två separata indexerare. - -### 18. KYC Frågor? - -Vänligen skicka e-post till info@thegraph.foundation - -### 19. Jag är inte redo att indexera Gnosis-kedjan, kan jag hoppa in och börja indexera från en annan kedja när jag är redo? - -Ja - -### 20. Finns det rekommenderade regioner för att köra servrarna? - -Vi ger inga rekommendationer om regioner. När du väljer platser kanske du vill tänka på var de stora marknaderna finns för kryptovalutor. - -### 21. Vad är "gasolkostnad för hanterare"? - -Det är det deterministiska måttet på kostnaden för att utföra en handler. I motsats till vad namnet kanske antyder är det inte relaterat till gaskostnaden på blockkedjor. diff --git a/website/pages/sv/querying/_meta.js b/website/pages/sv/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/sv/querying/_meta.js +++ b/website/pages/sv/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/sv/querying/graph-client/_meta.js b/website/pages/sv/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/sv/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/_meta.js b/website/pages/sw/_meta.js new file mode 100644 index 000000000000..f2f3b56163a5 --- /dev/null +++ b/website/pages/sw/_meta.js @@ -0,0 +1,5 @@ +import meta from '../en/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/arbitrum/_meta.js b/website/pages/sw/arbitrum/_meta.js new file mode 100644 index 000000000000..321fe93849be --- /dev/null +++ b/website/pages/sw/arbitrum/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/arbitrum/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/cookbook/_meta.js b/website/pages/sw/cookbook/_meta.js new file mode 100644 index 000000000000..7fc5602ab4d2 --- /dev/null +++ b/website/pages/sw/cookbook/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/cookbook/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/deploying/_meta.js b/website/pages/sw/deploying/_meta.js new file mode 100644 index 000000000000..3d7abedc4d57 --- /dev/null +++ b/website/pages/sw/deploying/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/deploying/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/developing/_meta.js b/website/pages/sw/developing/_meta.js new file mode 100644 index 000000000000..48d6b89bb3fe --- /dev/null +++ b/website/pages/sw/developing/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/developing/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/developing/creating-a-subgraph/_meta.js b/website/pages/sw/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/sw/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/developing/graph-ts/_meta.js b/website/pages/sw/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/sw/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/managing/_meta.js b/website/pages/sw/managing/_meta.js new file mode 100644 index 000000000000..a7c7b3d79464 --- /dev/null +++ b/website/pages/sw/managing/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/managing/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/network/_meta.js b/website/pages/sw/network/_meta.js new file mode 100644 index 000000000000..49858537c885 --- /dev/null +++ b/website/pages/sw/network/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/network/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/publishing/_meta.js b/website/pages/sw/publishing/_meta.js new file mode 100644 index 000000000000..eb06f56f912a --- /dev/null +++ b/website/pages/sw/publishing/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/publishing/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/querying/_meta.js b/website/pages/sw/querying/_meta.js new file mode 100644 index 000000000000..e52da8f399fb --- /dev/null +++ b/website/pages/sw/querying/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/querying/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/querying/graph-client/_meta.js b/website/pages/sw/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/sw/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/release-notes/_meta.js b/website/pages/sw/release-notes/_meta.js new file mode 100644 index 000000000000..1df59d0049cf --- /dev/null +++ b/website/pages/sw/release-notes/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/release-notes/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/sps/_meta.js b/website/pages/sw/sps/_meta.js new file mode 100644 index 000000000000..4ebd7d55a84f --- /dev/null +++ b/website/pages/sw/sps/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/sps/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/sw/translations.ts b/website/pages/sw/translations.ts new file mode 100644 index 000000000000..340f7eeea339 --- /dev/null +++ b/website/pages/sw/translations.ts @@ -0,0 +1,13 @@ +import supportedNetworks from './developing/supported-networks.json' +import docsearch from './docsearch.json' +import global from './global.json' +import index from './index.json' + +const translations = { + global, + index, + docsearch, + supportedNetworks, +} + +export default translations diff --git a/website/pages/tr/_meta.js b/website/pages/tr/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/tr/_meta.js +++ b/website/pages/tr/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 0f6093a2c7da..000000000000 --- a/website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Barındırılan Hizmet Üzerinde Subgraph Oluşturun ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/tr/deploying/hosted-service.mdx b/website/pages/tr/deploying/hosted-service.mdx deleted file mode 100644 index 7e56d003f6f1..000000000000 --- a/website/pages/tr/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -Bu bölüm, [barındırılan hizmet](https://thegraph.com/hosted-service/)'e bir subgraph dağıtma konusunda size yol gösterecektir. - -Barındırılan hizmette bir hesabınız yoksa, GitHub hesabınızla kaydolabilirsiniz. Kimliğinizi doğruladıktan sonra, kullanıcı arayüzü aracılığıyla subgraphlar oluşturmaya ve bunları terminalinizden dağıtmaya başlayabilirsiniz. Barındırılan hizmet Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum ve daha fazlası gibi bir dizi ağı desteklemektedir. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Subgraph Oluştur - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -Tercih ettiğiniz ağa halihazırda dağıtılmış bir akıllı sözleşmeniz varsa, bu sözleşmeden yeni bir subgraph'ı önyüklemek, barındırılan hizmete başlamak için iyi bir yol olabilir. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### Bir Proxy Sözleşmesinden - -Bir Proxy sözleşmesini izlemek üzere uyarlanmış bir subgraph oluşturmak için, uygulama sözleşmesinin adresini belirterek subgraph'ı başlatın. Başlatma işlemi tamamlandıktan sonra, son adım subgraph.yaml dosyasındaki ağ adının Proxy sözleşmesinin adresine güncellenmesini kapsar. Aşağıdaki komutu kullanabilirsiniz. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Barındırılan Hizmette Desteklenen Ağlar - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/tr/deploying/subgraph-studio.mdx b/website/pages/tr/deploying/subgraph-studio.mdx deleted file mode 100644 index 4ba104e6a0a6..000000000000 --- a/website/pages/tr/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## Subgraph Stüdyo'da Subgraph Nasıl Oluşturulur - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Graftlama - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/tr/developing/creating-a-subgraph.mdx b/website/pages/tr/developing/creating-a-subgraph.mdx deleted file mode 100644 index 2918dcfaff72..000000000000 --- a/website/pages/tr/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Subgraph Oluşturma ---- - -Subgraph, verileri bir blok zincirinden çıkarır, işler ve GraphQL aracılığıyla kolayca sorgulanabilmesi için depolar. - -![Subgraph Tanımlama](/img/defining-a-subgraph.png) - -Subgraph tanımı birkaç dosyadan oluşmaktadır: - -- `subgraph.yaml`: Subgraph manifest'ini içeren bir YAML dosyası - -- `schema.graphql`: Subgraph içinde depolanan verileri ve GraphQL üzerinden nasıl sorgulayacağınızı tanımlayan bir GraphQL şeması - -- `AssemblyScript Mappings`: Olay verilerinden şemanızda tanımlanan varlıklara çeviri yapan [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) kodu (örneğin bu öğretici içerikte `mapping.ts`) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Graph CLI'ı Yükleyin - -Graph CLI, JavaScriptle yazılmıştır ve kullanmak için `yarn` veya `npm` kurmanız gerekir; aşağıdaki içerik yarn yüklediğinizi varsaymaktadır. - -`Yarn`'a sahip olduğunuzda, Graph CLI'yi çalıştırarak yükleyin - -**Yarn ile kurulum:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Npm ile kurulum:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## Mevcut Bir Sözleşmeden - -Aşağıdaki komut, mevcut bir sözleşmenin tüm olaylarını indeksleyen bir subgraph oluşturur. Sözleşme ABI'sini Etherscan'dan almaya çalışır ve yerel bir dosya yolu istemeye geri döner. İsteğe bağlı argümanlardan herhangi biri eksikse, sizi etkileşimli bir formdan geçirir. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -``, Subgraph Studio'daki subgraph kimliğidir ve subgraph ayrıntıları sayfanızda bulunabilir. - -## Örnek Bir Subgraph'dan - -`Graph init`'in desteklediği ikinci mod, örnek bir subgraph'dan yeni bir proje oluşturmayı destekler. Aşağıdaki komut bunu yapar: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Mevcut Bir Subgraph'a Yeni veriKaynakları(dataSources) Ekleme - -`v0.31.0` 'dan itibaren, `graph-cli`, var olan bir subgraph'a `graph add` komutu aracılığıyla yeni veriKaynakları(dataSources) eklemeyi destekler. - -```sh -graph add
[] - -Seçenekler: - - --abi Sözleşme ABI'sinin yolu (varsayılan: Etherscan'dan indir) - --contract-name Sözleşme adı (varsayılan: Contract) - --merge-entities Aynı ada sahip varlıkların birleştirilip birleştirilmeyeceği (varsayılan: false) - --network-file Ağ yapılandırma dosyası yolu (varsayılan: "./networks.json") -``` - -`add` komutu, ABI'yi Etherscan'den getirecektir (`--abi` seçeneğiyle bir ABI yolu belirtilmedikçe) ve tıpkı `graph init` komutunun şemayı güncelleyerek ve eşleştirerek bir `dataSource` `--from-contract` oluşturması gibi yeni bir `dataSource` oluşturacaktır. - -`--merge-entities` seçeneği, geliştiricinin `entity` ve `event` ad çakışmalarını nasıl ele alacağını belirler: - -- `true` ise: yeni `dataSource` mevcut `eventHandlers` & `entities`'i kullanmalıdır. -- `false` ise: `${dataSourceName}{EventName}` ile yeni bir entity(varlık) & event handler(olay işleyicisi) oluşturulmalıdır. - -Sözleşme `adresi`, ilgili ağ için `networks.json`'a yazılacaktır. - -> **Not:** Etkileşimli cli kullanırken, `graph init` başarıyla çalıştırdıktan sonra yeni bir `dataSource` eklemeniz istenecektir. - -## Subgraph Manifestosu - -Subgraph manifest'i `subgraph.yaml`, subgraph'ınız tarafından indekslenen akıllı sözleşmeleri, bu sözleşmelerdeki hangi olaylara dikkat edileceğini ve olay verilerinin Graph Node'un depoladığı ve sorgulamasına izin verdiği varlıklarla nasıl eşleneceğini tanımlar. Subgraph manifestlerinin tüm özelliklerini [burada](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md) bulabilirsiniz. - -Örnek subgraph için `subgraph.yaml` şöyledir: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -Manifest için güncellenmesi gereken önemli girdiler şunlardır: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: kullanılan tüm [özellik(feature)](#experimental-features) adlarının bir listesi. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: Subgraph kaynaklarının olduğu akıllı sözleşmenin adresi ve kullanılacak akıllı sözleşmenin ABI'si. Adres isteğe bağlıdır; atlanması, tüm sözleşmelerden eşleşen olayları indekslemeyi sağlar. - -- `dataSources.source.startBlock`: veri kaynağının indekslemeye başladığı isteğe bağlı blok numarası. Çoğu durumda, sözleşmenin oluşturulduğu bloğun kullanılmasını öneririz. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: subgraph eşleştirmelerinde kullanılabilen anahtar-değer çiftleridir. `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` ve `BigInt` gibi çeşitli veri tipleri desteklenir. Her değişkenin `type` ve `data` özelliklerinin belirtilmesi gerekir. Bu bağlam değişkenlerine daha sonra eşleştirme dosyalarından erişilebilir ve böylece subgraph geliştirme için daha yapılandırılabilir seçenekler sunulmuş olur. - -- `dataSources.mapping.entities`: veri kaynağının depoya yazdığı varlıklar. Her varlık için şema, schema.graphql dosyasında tanımlanır. - -- `dataSources.mapping.abis`: kaynak sözleşmesinin yanı sıra eşleştirmelerde içeriden etkileşimde bulunduğunuz diğer akıllı sözleşmeler için bir veya daha fazla isimlendirilmiş ABI dosyası. - -- `dataSources.mapping.eventHandlers`: bu subgraph'ın tepki verdiği akıllı sözleşme olaylarını ve bu olayları depodaki varlıklara dönüştüren eşleştirme içindeki işleyicileri —./src/mapping.ts örnekte— listeler. - -- `dataSources.mapping.callHandlers`: bu subgraph'ın tepki verdiği akıllı sözleşme fonksiyonlarını ve fonksiyon çağrılarına giriş ve çıkışları depodaki varlıklara dönüştüren eşleştirmedeki işleyicileri listeler. - -- `dataSources.mapping.blockHandlers`: Bu subgraph'ın tepki verdiği blokları ve zincire bir blok eklendiğinde çalışacak eşleştirmedeki işleyicileri listeler. Filtre olmadan, blok işleyici her blokta çalıştırılacaktır. Opsiyonel bir çağrı filtresi, işleyiciye bir `filter` alan ile `kind: call` eklenerek sağlanabilir. Bu, blok veri kaynağı sözleşmesine en az bir çağrı içeriyorsa yanlızca işleyiciyi çalıştırır. - -Bir subgraph birden fazla akıllı sözleşmeden veri indeksleyebilir. `dataSources` dizisine indekslenmesi gereken veriden her kontrata bir giriş ekleyin. - -### Order of Triggering Handlers - -Bir bloktaki veri kaynağı için tetikleyiciler şu işlemlerle sıralanır: - -1. Olay ve çağrı tetikleyicileri, öncelikle bloktaki işlem indeksine göre sıralanır. -2. Aynı işlemdeki olay ve çağrı tetikleyicileri, bir kurala göre sıralanır: önce olay tetikleyicileri, ardından çağrı tetikleyicileri olmak üzere her tür manifest'te tanımlandıkları sıraya göre sıralanır. -3. Blok tetikleyicileri, olay ve çağrı tetikleyicilerinden sonra manifest'te tanımlandıkları sırada göre çalıştırılır. - -Bu sıralama kuralları değişebilir. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Sürüm | Sürüm Notları | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### ABI'leri Alma - -ABI dosya(lar)ı sözleşme(ler) inizle uygun olmalıdır. ABI dosyalarını edinmek için birkaç yol vardır: - -- Kendi projenizi oluşturuyorsanız, muhtemelen en güncel ABI'lerinize erişiminiz olacaktır. -- Herkese açık bir proje için bir subgraph oluşturuyorsanız, projeyi bilgisayarınıza indirerek [`truffle compile`](https://truffleframework.com/docs/truffle/overview) veya derlemek için solc kullanarak ABI'yi alabilirsiniz. -- ABI'yi ayrıca [Etherscan](https://etherscan.io/)'de de bulabilirsiniz, ancak bu her zaman güvenilir değildir çünkü yüklenen ABI güncelliğini yitirmiş olabilir. Doğru ABI'ye sahip olduğunuzdan emin olun, aksi takdirde subgraph çalıştırma başarısız olacaktır. - -## GraphQL Şeması - -Subgraph'ınızın şeması `schema.graphql` dosyasındadır. GraphQL şemaları, GraphQL arayüzü tanımlama dili kullanılarak tanımlanır. Daha önce bir GraphQL şeması yazmadıysanız, GraphQL tipi sisteme yönelik bu içeriği kontrol etmeniz önerilir. GraphQL şema referans belgeleri [GraphQL API](/querying/graphql-api) bölümünde bulunabilir. - -## Varlıkları Tanımlama - -Varlıkları tanımlamadan önce, bir adım geri atıp verilerinizin nasıl yapılandırıldığını ve bağlantılı olduğunu düşünmek önemlidir. Tüm sorgular, subgraph şemasında tanımlandığı şekilde veri modeline ve subgraph tarafından indekslenen varlıklara karşı yapılacaktır. Bu nedenle, subgraph şemasını dapp'iniz için ihtiyaçlarınıza uygun şekilde tanımlamanız iyi bir yaklaşım olacaktır. Varlıkları olaylar veya fonksiyonlar yerine "veri içeren nesneler" olarak farzetmek faydalı olabilir. - -Graph ile `schema.graphql`'de basitçe varlık türlerini tanımlarsınız ve Graph Düğümü bu varlık türünün tek bir örneğini ve koleksiyonunu sorgulamak için üst düzey alanlar oluşturur. Bir varlık olarak kabul edilmesi gereken her tür, `@entity` yönergesi ile işaretlenmelidir. Varsayılan olarak varlıklar değişkendir, yani eşlemeler mevcut varlıkları yükleyebilir, değiştirebilir ve o varlığın yeni bir sürümünü depolayabilir. Değişebilirlik bir bedelle gelir ve örneğin zincirden kelimesi kelimesine çıkarılan verileri içerdiklerinden dolayı asla değiştirilmeyecekleri bilinen varlık türleri için, bunları `@entity(immutable: true)` ile değişmez olarak işaretlenmesi önerilir. Eşleştirmeler, değişiklikler varlığın oluşturulduğu aynı blokta gerçekleştiği sürece değişmez varlıklarda değişiklik yapabilir. Değişmez varlıklar çok daha hızlı yazılıp sorgulanabilir, bu nedenle mümkün olduğunca kullanılmalıdır. - -### İyi Bir Örnek - -Aşağıdaki `Gravatar` varlığı, bir Gravatar nesnesi etrafında yapılandırılmıştır ve bir varlığın nasıl tanımlanabileceğine iyi bir örnektir. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Kötü Bir Örnek - -Aşağıdaki `GravatarAccepted` ve `GravatarDeclined` örnek varlıkları olayları temel alır. Olayların veya fonksiyon çağrılarının varlıklara birebir eşlenmesi önerilmez. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Opsiyonel ve Zorunlu Alanlar - -Varlık alanları zorunlu veya opsiyonel olarak tanımlanabilir. Zorunlu alanlar şemada `!` ile belirtilir. Eğer zorunlu bir alan eşleştirme işlemi sırasında ayarlanmazsa, alanı sorgularken şu hatayı alırsınız: - -``` -Null value resolved for non-null field 'name' -``` - -Her varlık türü bir `id` alanına sahip olmalıdır ve bu alan `Bytes!` veya `String!` türünde olmalıdır. Genellikle, `id` insan tarafından okunabilir metin içeriyorsa `Bytes!` kullanılması önerilir çünkü `Bytes!` türündeki id'leri olan varlıklar, `String!` `id`'leri olanlardan daha hızlı yazılıp sorgulanabilir. `id` alanı birincil anahtar olarak hizmet eder ve aynı türdeki tüm varlıklar arasında benzersiz olması gerekir. Tarihi nedenlerden dolayı, `ID!` türü de kabul edilir ve `String!` ile eşanlamlıdır. - -Bazı varlık türleri için `id`, iki diğer varlığın id'lerinden oluşturulur; bunu `concat` kullanarak yapmak mümkündür, örneğin `let id = left.id.concat(right.id)`, `sol(left)` ve `sağ(right)` id'lerinden id'yi oluşturmak için kullanılır. Benzer şekilde, var olan bir varlığın id'si ve sayıcı `sayısı(count)`kullanarak bir id oluşturmak için `let id = left.id.concatI32(count)` kullanılabilir. Birleştirme işleminin, `sol(left)`'in bu tür tüm varlıklar için aynı uzunlukta olduğu sürece benzersiz id'ler üretmesi garanti edilir, örneğin `left.id`'nin bir `Address` olması. - -### Gömülü Skaler(Scalar) Türler - -#### GraphQL'in Desteklediği Skalerler - -GraphQL API'mizde aşağıdaki skalerleri destekliyoruz: - -| Tür | Tanım | -| --- | --- | -| `Baytlar` | Byte dizisi, onaltılık bir dizgi olarak temsil edilir. Ethereum hash değerleri ve adresleri için yaygın olarak kullanılır. | -| `Dizgi(String)` | `string` değerleri için skaler. Null karakterleri desteklenmez ve otomatik olarak kaldırılır. | -| `Boolean` | `boolean` değerleri için skaler. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Büyük tamsayılar. Ethereum'un `uint32`, `int64`, `uint64`, ..., `uint256` türleri için kullanılır. Not: `int32`, `uint24` veya `int8` gibi `uint32`'nin altındaki her şey `i32`olarak temsil edilir. | -| `BigDecimal` | `BigDecimal` Yüksek hassasiyetli ondalık sayılar, bir anlamlı ve bir üsle temsil edilir. Üs aralığı -6143 ila +6144 arasındadır. 34 anlamlı rakama yuvarlanır. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Numaralandırmalar - -Ayrıca bir şema içinde numaralandırmalar da oluşturabilirsiniz. Numaralandırmalar aşağıdaki sözdizimine sahiptir: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Numaralandırma şemada tanımlandıktan sonra, bir varlık üzerinde numaralandırma alanını ayarlamak için numaralandırma değerinin dizgi gösterimini kullanabilirsiniz Örneğin, önce varlığınızı tanımlayarak ve ardından alanı `entity.tokenStatus = "SecondOwner"` ile ayarlayarak `tokenStatus`'u `SecondOwner` olarak ayarlayabilirsiniz. Aşağıdaki örnek, Token varlığının bir numaralandırma alanıyla nasıl görüneceğini göstermektedir: - -Numaralandırma yazmakla alakalı daha fazla ayrıntıyı [GraphQL belgelerinde](https://graphql.org/learn/schema/) bulabilirsiniz. - -#### Varlık İlişkileri - -Bir varlık, şemanızdaki bir veya daha fazla başka varlıkla ilişkili olabilir. Bu ilişkiler, sorgularınızda çaprazlanabilir. Graph'taki ilişkiler tek yönlüdür. İki yönlü ilişkileri simüle etmek, ilişkinin herhangi biri "son" üzerinde tek yönlü bir ilişki tanımlayarak mümkündür. - -İlişkiler, belirtilen türün başka bir varlığın türü olması dışında, diğer tüm alanlarda olduğu gibi varlıklar üzerinde tanımlanır. - -#### Bire Bir İlişkiler - -Bir `TransactionReceipt` varlık türüyle isteğe bağlı bire bir ilişkiye sahip bir `İşlem(Transaction)` varlık türü tanımlayın: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### Birden Çoğa İlişkiler - -Bir Token varlık türü ile zorunlu birden çoğa ilişkisi olan bir `TokenBalance` varlık türü tanımlayın: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Tersine Aramalar - -Ters aramalar, `@derivedFrom` alanı aracılığıyla bir varlıkta tanımlanabilir. Bu, varlık üzerinde sorgulanabilecek ancak eşleştirme API'si aracılığıyla manuel olarak ayarlanamayacak bir sanal alan oluşturur. Aksine, diğer varlık üzerinde tanımlanan ilişkiden türetilir. Bu ilişkiler için, genellikle ilişkinin her iki tarafını da depolamak anlamsızdır ve hem indeksleme hem de sorgu performansı, sadece bir tarafta depolanması ve diğerinde türetilmesi durumunda daha iyi olacaktır. - -Birden çoğa ilişkileriiçin, ilişki her zaman 'birden' tarafında depolanmalı ve her zaman 'çoğa' tarafında türetilmelidir. İlişkinin 'çoğa' tarafında bir dizi varlık depolamak yerine bu şekilde saklanması, subgraph indeksleme ve sorgulaması adına önemli ölçüde daha iyi performans sağlayacaktır. Genel olarak, varlık dizilerini depolamaktan mümkün olduğunca sakınılması gerekmektedir. - -#### Örnek - -Bir token bakiyelerini token'den erişilebilir hale getirebiliriz. Bunun için bir `tokenBalances` alanı türetmemiz gerekir: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Çoktan Çoğa İlişkiler - -Kullanıcıların her birinin birden çok kuruluşa mensup olabileceği gibi çoktan çoğa ilişkilerde, ilişkiyi modellemenin en basit fakat pek verimli olmayan yolu, ilişkide yer alan iki varlıkta da bir dizi olarak saklamaktır. İlişki simetrik ise, ilişkinin yalnızca bir tarafının saklanması gerekir ve diğer taraf türetilebilir. - -#### Örnek - -`User` varlık türünden `Organization` varlık türüne bir tersine arama tanımlayın. Aşağıdaki örnekte bu, `Organization` varlığı içindeki `members` özniteliğini arayarak elde edilir. Sorgularda, `User` üzerindeki `organizations` alanı, kullanıcının kimliğini(id) içeren tüm `Organization` varlıklarını bulmak suretiyle çözümlenir. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -Bu ilişkiyi daha performanslı bir şekilde depolamanın yolu, bunu her `User`/`Organization` çifti için bir girişe sahip bir eşleştirme tablosu aracılığıyla yapmaktır. Şema olarak şu şekilde olabilir - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -Bu yaklaşım, örneğin kullanıcılar için kuruluşları almak için sorguların ek bir seviyeye inmesini gerektirir: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -Çoktan çoğa ilişkileri depolamanın daha ayrıntılı bu yolu, subgraph için depolanan veri miktarının azalmasına ve bu sonucunda genellikle indekslenmesi ve sorgulanması önemli ölçüde daha hızlı olan bir subgraph sağlayacaktır. - -#### Şemaya notlar/yorumlar ekleme - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Tam Metinde Arama Alanlarını Tanımlama - -Tam metinde arama sorguları, metin arama girdisine dayanarak varlıkları filtreler ve sıralar. Tam metin sorguları, sorgu metni girişini indekslenmiş metin verileriyle karşılaştırmadan önce köklere işleyerek benzer kelimeler için eşleşmeler döndürebilir. - -Tam metin sorgusu tanımı, sorgu adı, metin alanlarını işlemek için kullanılan dil sözlüğü, sonuçları sıralamak için kullanılan sıralama algoritması ve aramaya dahil edilen alanları içerir. Her tam metin sorgusu birden fazla alana yayılabilir, ancak dahil edilen tüm alanlar tek bir varlık türünden olmalıdır. - -Tam metin sorgusu eklemek için, GraphQL şemasına tam metin yönergesi içeren bir `_Schema_` türü ekleyin. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -`bandSearch` örnek alanı, `name`,`description` ve `bio` alanlarındaki metin belgelerine göre `Band` varlıklarını filtrelemek için sorgularda kullanılabilir. Tam metin arama API'si ve daha fazla örnek kullanımı için [GraphQL API - Sorgulama](/querying/graphql-api#queries)'ya geçin. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Özellik Yönetimi](#experimental-features):** `specVersion` `0.0.4` ve sonrasında, `fullTextSearch`, subgraph bildiriminde `features` bölümü altında belirtilmelidir. - -### Desteklenen diller - -Farklı bir dil seçmek, tam metin arama API'sı üzerinde bazen az olsa da kesin bir etkiye sahip olacaktır. Tam metin sorgu alanı tarafından kapsanan alanlar, seçilen dile bağlı olarak incelenir, bu nedenle analiz ve arama sorguları tarafından üretilen sözlükbirimleri dilden dile değişir. Örneğin: desteklenen Türkçe sözlük kullanıldığında "token" kelimesi "toke" olarak kök alınırken, elbette İngilizce sözlük "token" olarak kök alacaktır. - -Desteklenen dil sözlükleri: - -| Kod | Sözlük | -| ----- | ---------- | -| yalın | Genel | -| da | Danca | -| nl | Flemenkçe | -| en | İngilizce | -| fi | Fince | -| fr | Fransızca | -| de | Almanca | -| hu | Macarca | -| it | İtalyanca | -| no | Norveççe | -| pt | Portekizce | -| ro | Romence | -| ru | Rusça | -| es | İspanyolca | -| sv | İsveççe | -| tr | Türkçe | - -### Algoritmaları Sıralama - -Sonuçları sıralamak için desteklenen algoritmalar: - -| Algoritma | Tanım | -| ------------- | ----------------------------------------------------------------------------------- | -| rank | Sonuçları sıralamak için tam metin sorgusunun eşleştirme kalitesini (0-1) kullanın. | -| proximityRank | Rank'a benzer ancak eşleşmelerin benzerliğini de içerir. | - -## Eşleştirmeleri Yazma - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -`subgraph.yaml`'da `mapping.eventHandlers` altında tanımlanan her olay işleyicisi için, aynı isimde dışa aktarılmış bir fonksiyon oluşturun. Her işleyici, işlenen olayın adına karşılık gelen bir türde `event` adında tek bir parametre kabul etmelidir. - -Örnek subgraph'ta,`src/mapping.ts` dosyası `NewGravatar` ve `UpdatedGravatar` olayları için işleyiciler içerir: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -İlk işleyici, `NewGravatar` olayını alır ve karşılık gelen olay parametrelerini kullanarak varlık alanlarını dolduran `new Gravatar(event.params.id.toHex())` ile yeni bir `Gravatar` varlığı oluşturur. Bu varlık örneği, `event.params.id.toHex()` kimlik değeri olan `gravatar` değişkeni tarafından temsil edilir. - -İkinci işleyici, mevcut `Gravatar`'ı Graph Düğümü deposundan yüklemeye çalışır. Henüz mevcut değilse, talep üzerine oluşturulur. Varlık daha sonra `gravatar.save()` kullanılarak mağazaya geri kaydedilmeden önce yeni olay parametreleriyle eşleşecek şekilde güncellenir. - -### Yeni Varlıklar Oluşturmak için Önerilen Kimlikler(IDs) - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Kod Oluşturma - -Akıllı sözleşmeler, olaylar ve varlıklarla çalışmayı kolay ve tip güvenli hale getirmek amacıyla Graph CLI, subgraph'ın GraphQL şemasından ve veri kaynaklarında bulunan sözleşme ABI'lerinden AssemblyScript türleri oluşturabilir. - -Bununla yapılır - -```sh -graph codegen [--output-dir ] [] -``` - -ancak çoğu durumda, subgraphlar zaten `package.json` aracılığıyla önceden yapılandırılmıştır, bu nedenle aşağıdakilerden birini çalıştırarak aynı sonucu elde etmek mümkündür: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -Bu, `subgraph.yaml`'da belirtilen ABI dosyalarındaki her akıllı sözleşme için bir AssemblyScript sınıfı oluşturacak ve bu sözleşmeleri eşleştirmelerle belirli adreslere bağlamanıza ve işlenen bloğa karşı salt okunur sözleşme yöntemlerini çağırmanıza olanak tanıyacaktır. Ayrıca, her sözleşme olayı için bir sınıf oluşturacak ve olay parametrelerine kolay erişim sağlayacak, ayrıca olayın kaynaklandığı blok ve işlemi sağlayacaktır. Tüm bu tipler `//.ts` dosyasına yazılmaktadır. Örnek subgraph'ta, bu `generated/Gravity/Gravity.ts` olur, böylece eşleştirmelerin bu tipleri iç aktarmasına izin verilir. - -```javascript -import { - // Kontrat sınıfı: - Gravity, - // Olayların sınıfları: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -Bunun yanı sıra, subgraph'taki GraphQL şemasında bulunan her varlık türü için bir sınıf oluşturulur. Bu sınıflar, tip güvenli varlık yükleme, varlık alanlarına okuma ve yazma erişimi sağlar ve ayrıca bir `save()` yöntemi ile varlıkları depoya yazarlar. Tüm varlık sınıfları `/schema.ts`'ye yazılır, böylece eşleştirmeler şu şekilde bunları içe aktarabilir - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Not:** Kod oluşturma, GraphQL şemasındaki veya manifeste dahil edilen ABI'lerdeki her değişiklikten sonra tekrar yapılmalıdır. Ayrıca, subgraph oluşturulmadan önce en az bir kez yapılmalıdır. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Veri Kaynağı Şablonları - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Ana Sözleşme için Veri Kaynağı - -İlk olarak, ana sözleşme için düzenli bir veri kaynağı tanımlarsınız. Aşağıdaki örnek, [Uniswap](https://uniswap.org) borsa(exchange) factory sözleşmesi için basitleştirilmiş bir veri kaynağı göstermektedir. `NewExchange(address,address)` olay işleyicisine dikkat edin. Bu, factory sözleşmesi tarafından zincir üstünde yeni bir takas sözleşmesi oluşturulduğunda yayınlanır. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Dinamik Olarak Oluşturulan Sözleşmeler için Veri Kaynağı Şablonları - -Ardından, manifest'e _veri kaynağı şablonları_ eklersiniz. Bunlar, `source` altında önceden tanımlanmış bir sözleşme adresi olmayan düzenli veri kaynaklarıyla aynıdır. Genellikle, ana sözleşme tarafından yönetilen veya başvurulan her alt-sözleşme türü için bir şablon tanımlarsınız. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Bir Veri Kaynağı Şablonunun Örneklenmesi - -Son adımda, ana sözleşme eşleştirmesini güncelleyerek bir şablondan dinamik bir veri kaynağı örneği oluşturursunuz. Bu örnekte, ana sözleşme eşlemesini değiştirerek `Exchange` şablonunu içe aktarır ve yeni takas sözleşmesini indekslemek için `Exchange.create(address)` yöntemini çağırırsınız. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Borsayı indekslemeye başlayın; "event.params.exchange" - // yeni borsa sözleşmesinin adresi - Exchange.create(event.params.exchange) -} -``` - -> **Not:** Yeni bir veri kaynağı, oluşturulduğu blok ve tüm takip eden bloklar için yalnızca çağrıları ve olayları işleyecektir, ancak önceki bloklarda bulunan geçmiş verileri işlemeyecektir. -> -> Eğer önceki bloklar, yeni veri kaynağı için ilgili veri içeriyorsa, o veriyi indekslemek için sözleşmenin mevcut durumunu okuyarak ve yeni veri kaynağı oluşturulurken o zaman dilimindeki durumu temsil eden varlıklar oluşturarak yapmak en iyisidir. - -### Veri Kaynağı Bağlamı - -Veri kaynağı bağlamları, bir şablonu anında özelleştirmek için ek yapılandırma geçişine izin verir. Örneğimizde, borsalar belirli bir alım-satım çifti ile ilişkilendirilir ve bu bilgi `NewExchange` olayına dahil edilir. Bu bilgi, oluşturulan veri kaynağına şöyle aktarılabilir: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -`Exchange` şablonunun eşleştirmesi içinde, bağlama şu şekilde erişilebilir: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -Tüm değer tipleri için `setString` ve `getString` gibi ayarlayıcılar ve alıcılar vardır. - -## Başlangıç Blokları - -`startBlock`, veri kaynağının indekslemeye başlayacağı zincirdeki hangi bloktan başlayacağını belirlemenize olanak tanıyan isteğe bağlı bir ayarlamadır. Başlangıç bloğunu belirlemek, veri kaynağının ilgisiz olabilecek potansiyel milyonlarca bloğu atlamasına olanak tanır. Tipik olarak, bir subgraph geliştiricisi, veri kaynağı akıllı sözleşmesinin oluşturulduğu bloğa `startBlock` ayarlar. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Not:** Sözleşme oluşturma bloğu hızlı bir şekilde Etherscan'da aranabilir: -> -> 1. Arama çubuğuna adresini girerek sözleşmeyi arayın. -> 2. `Contract Creator` bölümünde oluşturma işlemi hash'ına tıklayın. -> 3. İşlem detayları sayfasını yükleyin ve bu sözleşme için başlangıç bloğunu bulacaksınız. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Çağrı İşleyicileri - -Etkinliğin bir sözleşmenin durumunda ilgili değişiklikleri toplamak için etkili bir yol sağlamasıyla birlikte, birçok sözleşme gaz maliyetlerini optimize etmek için günlük oluşturmaktan kaçınır. Bu durumlarda, bir subgraph veri kaynağı sözleşmesine yapılan çağrılara abone olabilir. Bunun için, işlev imzasına ve bu işlevi işleyecek eşleme işleyicisine başvurularak çağrı işleyicileri tanımlanır. Bu çağrıları işlemek için eşleme işleyicisi, `ethereum.Call` olarak adlandırılan ve çağrıya ilişkin yazılım girdileri ve çıktıları olan bir argüman alır. İşlem'in çağrı zincirinin herhangi bir derinliğinde yapılan çağrılar eşleştirmeyi tetikleyecektir, bu sayede veri kaynağı sözleşmesi aracılığıyla proxy sözleşmeleri aracılığıyla gerçekleştirilen faaliyetler yakalanabilir. - -Çağrı işleyicileri yalnızca iki durumdan birinde tetiklenir: belirtilen işlevin sözleşme tarafından değil, başka bir hesap tarafından çağrılması durumunda veya Solidity'de harici olarak işaretlenip aynı sözleşmenin başka bir işlevinin bir parçası olarak çağrılması durumunda yalnızca tetiklenir. - -> **Not:** Çağrı işleyicileri şu anda Parity izleme API'sine bağlıdır. BNB zinciri ve Arbitrum gibi bazı ağlar bu API'yı desteklemez. Bu ağlardan birini indeksleyen bir subgraph, bir veya daha fazla çağrı işleyicisi içeriyorsa senkronizasyon başlatılmaz. Subgraph geliştiricileri bunun yerine etkinlik işleyicilerini kullanmalıdır. Bunlar çağrı işleyicilerinden çok daha performanslıdır ve her EVM ağı tarafından desteklenir. - -### Bir Çağrı İşleyici Tanımlama - -Manifestinizde bir çağrı işleyicisi tanımlamak için sadece abone olmak istediğiniz veri kaynağı altında bir `callHandlers` dizisi ekleyin. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`function`, çağrıları filtrelemek için normalleştirilmiş işlev imzasıdır. `handler` özelliği, veri kaynağı sözleşmesinde hedef işlev çağrıldığında yürütmek istediğiniz işlevin adıdır. - -### Eşleştirme fonksiyonu - -Her çağrı işleyicisi, çağrılan işlevin adına karşılık gelen bir tipe sahip tek bir parametre alır. Yukarıdaki örnek subgraphta eşleme, `createGravatar` işlevi çağrıldığında ve bir `CreateGravatarCall` parametresi olarak alındığında işleyici içerir: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -`handleCreateGravatar` fonksiyonu, `@graphprotocol/graph-ts` tarafından sağlanan `ethereum.Call` alt sınıfı olan ve çağrının yazılmış girdileri ve çıktıları içeren yeni bir `CreateGravatarCall`'u alır. `CreateGravatarCall` türü, `graph codegen` çalıştırıldığında sizin için oluşturulur. - -## Blok İşleyicileri - -Bir subgraph, sözleşme olaylarına veya işlev çağrılarına abone olmanın yanı sıra, zincire yeni bloklar eklendikçe verilerini güncellemek isteyebilir. Bu işlemi gerçekleştirmek için a subgraph, her blok sonrasında veya önceden tanımlanmış bir filtreye uygun bloklardan sonra bir işlev çalıştırabilir. - -### Desteklenen Filtreler - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_Tanımlanan işleyici, işleyicinin altında tanımlandığı sözleşmeye (veri kaynağı) çağrı içeren her blok için bir kez çağrılacaktır._ - -> **Not:** `call` filtresi şu anda Parity izleme API'sine bağlıdır. BNB zinciri ve Arbitrum gibi bazı ağlar bu API'yi desteklemez. Bu ağlardan birini indeksleyen bir subgraph, `call` filtresi olan bir veya daha fazla blok işleyici içeriyorsa, senkronizasyona başlatılmaz. - -Bir blok işleyicisi için filtre olmaması, işleyicinin her blok için çağrılacağı anlamına gelir. Bir veri kaynağı, her filtre türü için yalnızca bir blok işleyicisi içerebilir. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filtresi - -> **`specVersion` >= 0.0.8 gerektirir** - -> **Not:** Polling filtreleri yalnızca `kind: ethereum` olan dataSources üzerinde kullanılabilir. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -Tanımlanan işleyici her `n` blok için bir kez çağrılacaktır; burada `n`, `every` alanında sağlanan değerdir. Bu yapılandırma, subgraph'ın düzenli blok aralıklarında belirli işlemleri gerçekleştirmesini sağlar. - -#### Once Filtresi - -> **`specVersion` >= 0.0.8 gerektirir** - -> **Not:** Once filtreleri yalnızca `kind: ethereum` olan dataSources üzerinde kullanılabilir. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -Once filtresi ile tanımlanan işleyici, diğer tüm işleyiciler çalışmadan önce yalnızca bir kez çağrılacaktır. Bu yapılandırma, subgraph'ın işleyiciyi indekslemenin başlangıcında belirli görevleri yerine getirmesine olanak sağlayan bir başlatma işleyicisi olarak kullanmasına yarar. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Eşleştirme fonksiyonu - -Eşleştirme fonksiyonu, argümanı olarak yalnızca bir `ethereum.Block` alacaktırr. Olaylar için eşleştirme işlevleri gibi, bu işlev depodaki mevcut subgraph varlıklarına erişebilir, akıllı sözleşmeleri çağırabilir ve varlıkları oluşturabilir veya güncelleyebilir. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonim Olaylar - -Solidity'de anonim olayları işlemek gerekiyorsa, örnekte olduğu gibi, olayın topic 0'ını sağlayarak bunu başarabilirsiniz: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -Bir olay, yalnızca imza ve topic 0 eşleştiğinde tetiklenir. Varsayılan olarak `topic0`, olay imzasının hash değerine eşittir. - -## Olay İşleyicilerinde İşlem Makbuzları - -`specVersion` `0.0.5` ve `apiVersion` `0.0.7`'den itibaren olay işleyicileri, onları yayınlayan işlemin makbuzuna erişebilir. - -Bunun için olay işleyicileri, subgraph manifest dosyasında isteğe bağlı ve varsayılan olarak false olan yeni `receipt: true` anahtarını kullanarak belirtilmelidir. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -İşleyici işlevi içinde, makbuz `Event.receipt` alanında erişilebilir. Manifestte makbuz(`receipt`) anahtarı `false` olarak ayarlandığında veya atlandığında, `null` bir değer döndürülür. - -## Deneysel özellikler - -`specVersion` `0.0.4`'ten itibaren subgraph özellikleri, manifest dosyasının en üst düzeyindeki özellikler(`features`) bölümünde, aşağıdaki tabloda listelendiği gibi `camelCase` adlarıyla açıkça belirtilmelidir: - -| Özellik | İsim | -| --------------------------------------------------- | ---------------- | -| [Ölümcül Olmayan Hatalar](#non-fatal-errors) | `nonFatalErrors` | -| [Tam Metin Arama](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Graftlama](#grafting-onto-existing-subgraphs) | `grafting` | - -Örneğin, bir subgraph Tam Metin Arama(**Full-Text Search**) ve Ölümcül Olmayan Hatalar(**Non-fatal Errors**) özelliklerini kullanıyorsa, özellikler(`features`) alanı manifestte şöyle olmalıdır: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Bir özelliği bildirmeden kullanmanın, subgraph dağıtımı sırasında bir **validation error**'a yol açacağını, ancak bir özellik bildirilmiş ancak kullanılmamışsa herhangi bir hata oluşmayacağını unutmayın. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Ölümcül Olmayan Hatalar - -Halihazırda senkronize edilmiş subgraphlarda indeksleme hataları varsayılan olarak subgraph başarısız olmasına ve senkronizasyonun durmasına neden olur. Hatalara rağmen senkronizasyonun devam etmesi için subgraphlar, hata tetikleyen işleyicinin yapılan değişikliklerini yok sayarak yapılandırılabilir. Bu, subgraph yazarlarının subgraphlarını düzeltmeleri için zaman kazandırırken, sorguların en son blokta sunulmaya devam etmesini sağlar, ancak hata nedeniyle sonuçlar tutarsız olabilir. Bazı hatalar hala her zaman ölümcül olacaktır. Ölümcül olmaması için hatanın belirlenmiş olması gerekmektedir. - -> **Not:** Graph Ağı, henüz ölümcül olmayan hataları desteklemiyor ve geliştiricilerin bu işlevselliği kullanarak subgraphları Studio aracılığıyla ağa dağıtması önerilmez. - -Ölümcül olmayan hataların etkinleştirilmesi, subgraph manifestinde aşağıdaki özellik bayrağının ayarlanmasını gerektirir: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -Sorgu, `subgraphError` argümanı aracılığıyla potansiyel tutarsızlıklarla sorgulama yapmak için de seçilebilir. Subgraph'ta hataların atlandığını kontrol etmek için `_meta`'yı sorgulamak da önerilir, örnekte olduğu gibi: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -Subgraph bir hatayla karşılaşırsa bu sorgu, hem verileri hem de `"indexing_error"` mesajıyla birlikte bir graphql hatasını döndürecektir, örnekte olduğu gibi: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Mevcut Subgraph'ta Graftlama - -> **Not:** Graph Ağı'na ilk yükseltme yapılırken graftlama kullanılması önerilmez. Daha fazla bilgi için[buraya](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network) bakın. - -Bir subgraph ilk olarak dağıtıldığında, ilgili zincirin başlangıç bloğundan (veya her veri kaynağı için belirlenen `startBlock`'tan) olayları indekslemeye başlar. Bazı durumlarda, mevcut bir subgraph'tan verilerin yeniden kullanılması ve çok daha sonraki bir blokta indekslemeye başlanması yararlı olabilir. Bu indeksleme yöntemi _Grafting_ olarak adlandırılır. Grafting, örneğin, eşleştirmelerdeki basit hataları hızlı bir şekilde geçmek veya bir subgraph başarısız olduktan sonra geçici olarak tekrar çalıştırmak için kullanışlıdır. - -Bir subgraph temel bir subgraph üzerine graft edildiğinde, `subgraph.yaml`'daki subgraph belirtimi en üst düzeyde bir `graft` bloğu içerir: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -Manifesti `graft` bloğu içeren bir subgraph dağıtıldığında, Graph Düğümü verilen bloğa(`block`) kadar olan temel`base` subgraph verilerini kopyalar ve ardından yeni subgraph'a o bloktan devam eder. Temel subgraph, hedef Graph Düğüm örneğinde mevcut olmalı ve en azından verilen bloka kadar indekslemiş olmalıdır. Bu kısıtlama nedeniyle, graftlama yalnızca geliştirme sırasında veya acil durumlarda, eşdeğer graftlanmamış bir subgraph oluşturmaya hız kazandırmak için kullanılmalıdır. - -Graftlama, temel verileri indekslemek yerine kopyaladığından, subgraph'ı istenen bloğa getirmek sıfırdan indekslemeye nazaran çok daha hızlıdır, ancak ilk veri kopyası çok büyük subgraphlar için yine birkaç saat sürebilir. Graftlanmış subgraph başlatılırken, Graph Düğümü halihazırda kopyalanmış olan varlık türleri hakkında bilgileri kaydedecektir. - -Graftlanan subgraph, temel subgraphla tamamen aynı olmayan, ancak onunla uyumlu olan bir GraphQL şeması kullanabilir. Kendi başına geçerli bir subgraph şeması olmalıdır, ancak şu şekillerde temel subgraph şemasından sapabilir: - -- Varlık türlerini ekler veya kaldırır -- Varlık türlerinden öznitelikleri kaldırır -- Varlık türlerine null yapılabilir öznitelikler ekler -- Null yapılamayan öznitelikleri null yapılabilir özniteliklere dönüştürür -- Numaralandırmalara değerler ekler -- Arayüzleri ekler veya kaldırır -- Arayüzün hangi varlık türleri için uygulandığını değiştirir - -> **[Özellik Yönetimi](#experimental-features):** graftlama(`grafting`) subgraph manifestindeki özellikler(`features`) altında bildirilmelidir. - -## IPFS/Arweave File Data Sources - -Dosya veri kaynakları, indeksleme sırasında zincir dışı verilere sağlam ve genişletilebilir bir şekilde erişmek için yeni bir subgraph fonksiyonudur. Dosya veri kaynakları IPFS'den ve Arweave'den dosya getirmeyi desteklemektedir. - -> Bu aynı zamanda zincir dışı verilerinin belirlenebilir indekslenmesi için zemin hazırlar ve keyfi HTTP kaynaklı verilerin tanıtılma potansiyelini de beraberinde getirir. - -### Genel Bakış - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> Bu, mevcut `ipfs.cat` API'sinin yerini alır - -### Yükseltme rehberi - -#### `graph-ts` ve `graph-cli`'yi güncelleyin - -Dosya veri kaynakları, graph-ts >=0.29.0 and graph-cli >=0.33.1 gerektirir - -#### Dosyalar bulunduğunda güncellenecek yeni bir varlık türü ekleyin - -Dosya veri kaynakları zincir tabanlı varlıklara erişemez veya bunları güncelleyemez, ancak dosya belirli varlıkları güncellemelidir. - -Bu, mevcut varlıklardaki alanları ayrı varlıklara bölmeyi gerektirebilir ve bunlar birbirine bağlanabilir. - -Özgün birleştirilmiş varlık: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -Yeni, ayrılmış varlık: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -Ana varlık ve sonuç dosya veri kaynak varlığı arasındaki ilişki bire bir ise, en basit kalıp, IPFS CID'yi arama anahtarı olarak kullanarak ana varlığını sonuç dosya varlığına bağlamaktır. Yeni dosya tabanlı varlıklarınızın modellemesiyle ilgili sorun yaşarsanız Discord üzerinden iletişime geçin! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### `kind: file/ipfs` veya `kind: file/arweave` ile yeni bir şablonlu veri kaynağı ekleyin - -Bu, ilgi alanı dosyası tespit edildiğinde oluşturulacak veri kaynağıdır. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Şu anda `abis` gerekli olsa da, dosya veri kaynaklarından sözleşmeleri çağırmak mümkün değildir - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Dosyaları işlemek için yeni bir işleyici oluşturun - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -Dosyanın okunabilir bir dize olarak CID'sine `dataSource` aracılığıyla şu şekilde erişilebilir: - -```typescript -const cid = dataSource.stringParam() -``` - -Örnek işleyici: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Gerektiğinde dosya veri kaynakları oluşturun - -Artık zincir tabanlı işleyicilerin yürütülmesi sırasında dosya veri kaynakları oluşturabilirsiniz: - -- Otomatik olarak oluşturulmuş şablonları(`templates`) içe aktarın -- cid'nin IPFS veya Arweave için geçerli içerik tanımlayıcısı olduğu bir eşleştirme içinden `TemplateName.create(cid: string)` öğesini çağırın - -Graph Düğümü, IPFS için [v0 ve v1 içerik tanımlayıcılarını](https://docs.ipfs.tech/concepts/content-addressing/), ve dizinli içerik tanımlayıcılarını desteklemektedir. (örneğin `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Örnek: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//Bu örnek kod, bir Crypto coven subgraph'ı içindir. Yukarıdaki ipfs hash'ı, tüm kripto NFT'leri için token üst verilerine sahip bir dizindir. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //Bu, tek bir Crypto coven NFT için üst verilere giden bir yol oluşturur. Dizini "/" + dosya adı + ".json" ile birleştirir. - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -Bu, Graph Düğümü'nün yapılandırılmış IPFS veya Arweave uç noktasını sorgulayacak yeni bir veri kaynağı dosyası oluşturacak ve bulunamazsa yeniden deneyecek. Dosya bulunduğunda, dosya veri kaynağı işleyicisi çalıştırılacaktır. - -Bu örnek, ana `Token` varlığı ile sonuç `TokenMetadata` varlığı arasındaki arama olarak CID'i kullanmaktadır. - -> Bu, daha önce bir subgraph geliştiricisi'nin `ipfs.cat(CID)` çağrısını yaparak dosyayı aldığı noktadır - -Tebrikler, dosya veri kaynaklarını kullanıyorsunuz! - -#### Subgraph'ınızı dağıtma - -Artık subgraph'ınızı oluşturabilir(`build`) ve herhangi bir Graph Düğümüne >=v0.30.0-rc.0 dağıtabilirsiniz(`deploy`). - -#### Sınırlamalar - -Dosya veri kaynağı işleyicileri ve varlıkları yürütüldüklerinde belirleyici olmaları ve zincir tabanlı veri kaynaklarının bozulmasını önlemeleri için, diğer subgraph varlıklarından izole edilir,. Açıkça şunlardır: - -- Dosya Veri Kaynakları tarafından oluşturulan varlıklar değiştirilemez ve güncellenemez -- Dosya Veri Kaynağı işleyicileri, diğer dosya veri kaynaklarından varlıklara erişemez -- Dosya Veri Kaynaklarıyla ilişkili varlıklara zincir tabanlı işleyicilerden erişilemez - -> Bu kısıtlama çoğu kullanım durumu için sorun oluşturmamalıdır, ancak bazı durumlarda karmaşıklıklığa sebep olabilir. Dosya tabanlı verilerinizi bir subgraph'ta modellemekte zorluk yaşarsanız, lütfen Discord üzerinden bizimle iletişime geçin! - -Ek olarak, zincir üstü bir veri kaynağı veya başka bir dosya veri kaynağı olsun, bir dosya veri kaynağından veri kaynakları oluşturmak mümkün değildir. Bu kısıtlama gelecekte kaldırılabilir. - -#### En iyi uygulamalar - -NFT meta verilerini ilgili tokenleri bağlarken, Üst veri varlığına Token varlığından başvurmak için üst verinin IPFS hash değerini kullanın. Üst veri varlığını IPFS hash değerini bir kimlik olarak kullanarak kaydedin. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -Birden çok kez yenilenen varlıklarınız varsa, IPFS hash değeri & varlık kimliğini kullanarak benzersiz dosya tabanlı varlıklar oluşturun ve bunları zincir tabanlı bir varlıkta türetilmiş alanda referans gösterin. - -> Yukarıdaki öneriyi geliştirmeye çalışıyoruz, bu nedenle sorgular yalnızca "en son" sürümü döndürür - -#### Bilinen Sorunlar - -Dosya veri kaynakları şu anda ABI'leri gerektirir, ancak ABI'ler kullanılmaz ([github issue](https://github.com/graphprotocol/graph-cli/issues/961)). Geçici çözüm, herhangi bir ABI eklemektir. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Örnekler - -[Crypto Coven Subgraph taşınması](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### Referanslar - -[GIP Dosyası Veri Kaynakları](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/tr/developing/creating-a-subgraph/_meta.js b/website/pages/tr/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/tr/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/tr/developing/graph-ts/_meta.js b/website/pages/tr/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/tr/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/tr/managing/deprecate-a-subgraph.mdx b/website/pages/tr/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/tr/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/tr/mips-faqs.mdx b/website/pages/tr/mips-faqs.mdx deleted file mode 100644 index da1e9c76231c..000000000000 --- a/website/pages/tr/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Giriş - -> Not: MIPs programı Mayıs 2023 itibariyle kapanmıştır. Katılan tüm İndeksleyicilere teşekkür ederiz! - -Graph ekosistemine katılmak için heyecan verici bir zaman! Yaniv Tal, Graph Day 2022](https://thegraph.com/graph-day/2022/) sırasında Graph ekosisteminin uzun yıllardır üzerinde çalıştığı bir an olan [barındırılan hizmetin kullanımdan kaldırılacağını](https://thegraph.com/blog/sunsetting-hosted-service/) duyurdu. - -Barındırılan hizmetin kullanımdan kaldırılması ve tüm faaliyetlerinin merkeziyetsiz ağa taşınmasını desteklemek için Graph Vakfı [Geçiş Altyapısı Sağlayıcıları (crwd)lbracketdwrcMIPs programını] (https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program) duyurdu. - -MIPs programı, Ethereum ana ağının dışındaki zincirleri indekslemek ve Graph protokolü'nün merkeziyetsiz ağı çok zincirli bir altyapı katmanına genişletmesine yardımcı olmak için kaynaklarla İndeksleyicilere desteklemeyi amaçlayan bir teşvik programıdır. - -MIPs programı, GRT arzının %0,75'inin (75 milyon GRT), %0,5'ini ağın önyüklenmesine katkıda bulunan İndeksleyicileri ödüllendirmek ve %0,25'ini çok zincirli subgraphler kullanan subgraph geliştiricileri için Ağ Hibelerine tahsis etmiştir. - -### Yararlı Kaynaklar - -- [Vincent (Victor) Taglia'dan İndeksleyici 2ools](https://indexer-2ools.vincenttaglia.com/#/) -- [Graph Ağı'nda Nasıl Etkili Bir İndeksleyici Olunur?](https://thegraph.com/blog/how-to-become-indexer/) -- [İndeksleyici Bilgi Merkezi](https://thegraph.academy/indexers/) -- [Tahsis Optimizatörü](https://github.com/graphprotocol/allocationopt.jl) -- [Tahsis Optimizasyon Aracı](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Bir subgraph başarısız olsa bile geçerli bir indeksleme kanıtı (POI) oluşturmak mümkün müdür? - -Evet, gerçekten de öyle. - -Bağlam için, tahkim tüzüğü [tüzük hakkında daha fazla bilgiyi buradan edinebilirsiniz] (https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), başarısız bir subgraph için POI oluşturma metodolojisini belirtir. - -Topluluk üyelerinden [SunTzu](https://github.com/suntzu93), bu süreci tahkim tüzüğünün metodolojisine uygun olarak otomatikleştirmek için bir script oluşturdu. Github deposuna göz atın [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. MIPs programı ilk olarak hangi zinciri teşvik edecek? - -Merkeziyetsiz ağ üzerinde desteklenecek ilk zincir Gnosis Zinciri! Eskiden xDAI olarak bilinen Gnosis Zinciri, EVM tabanlı bir zincirdir. Gnosis Zinciri, çalışan düğümlerin kullanıcı dostu olması, İndeksleyici hazırlığı, Graph ile uyumu ve web3 içinde benimsenmesi dolayısıyla ilk olarak seçilmiştir. - -### 3. MIPs programına yeni zincirler nasıl eklenecek? - -Yeni zincirler, MIPs programı boyunca İndeksleyici hazırlığı, talep ve topluluk görüşüne dayalı olarak duyurulacaktır. Zincirler ilk olarak test ağında desteklenecek ve daha sonra bu zinciri ana ağda desteklemek için bir GIP başarılı şekilde geçecektir. MIPs programına katılan indeksleyiciler hangi zincirleri desteklemek istediklerini seçecek ve subgraphlar'a hizmet vererek ağda sorgu ücretleri ve indeksleme ödülleri kazanmanın yanı sıra zincir başına ödüller kazanacaklardır. MIPs katılımcıları performanslarına, ağ ihtiyaçlarına hizmet etme becerilerine ve topluluk desteğine göre puanlanacaktır. - -### 4. Ağın yeni bir zincir için hazır olduğunu nasıl anlayacağız? - -Graph Vakfı, hazır olma durumunu en iyi şekilde değerlendirmek adına QoS performans ölçümlerini, ağ performansını ve topluluk kanallarını izleyecektir. Öncelik, çok zincirli merkeziyetsiz uygulamaların subgraphlar'ını geçirebilmeleri için ağın performans ihtiyaçlarını karşılamasını sağlamaktır. - -### 5. Ödüller zincir başına nasıl paylaştırılır? - -Zincirlerin düğümleri senkronize etme gereksinimleri, sorgu hacmi ve benimseme açısından farklılık gösterdiği göz önüne alındığında, tüm geri bildirimlerin ve öğrenimlerin elde edilmesini sağlamak için zincir başına ödüller o zincirin döngüsünün sonunda kararlaştırılacaktır. Aynı zamanda İndeksleyiciler, zincir ağ üzerinde desteklendiğinde sorgu ücretleri ve indeksleme ödülleri de kazanabileceklerdir. - -### 6. MIPs programındaki tüm zincirleri indekslememiz mi gerekiyor yoksa sadece bir zincir seçip onu indeksleyebilir miyiz? - -İstediğiniz zinciri indeksleyebilirsiniz! MIPs programının amacı, İndeksleyicileri istedikleri zincirleri indekslemeleri ve ilgilendikleri web3 ekosistemlerini desteklemeleri için ihtiyaç duydukları araç ve bilgilerle donatmaktır. Bununla birlikte, her zincir için test ağından ana ağa kadar aşamalar bulunmaktadır. İndekslediğiniz zincirler için tüm aşamaları tamamladığınızdan emin olun. Aşamalar hakkında daha fazla bilgi edinmek için [MIPs notion sayfasına] (https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) göz atın. - -### 7. Ödüller ne zaman dağıtılacak? - -MIPs ödülleri zincir başına, performans ölçütleri karşılandığında ve geçirilen subgraphlar bu İndeksleyiciler tarafından desteklendiğinde dağıtılacaktır. Zincir döngüsünün ortalarında zincir başına toplam ödüller hakkında bilgi almaya bakın. - -### 8. Puanlama nasıl işliyor? - -İndeksleyiciler, program boyunca puanlamaya dayalı ödüller için liderlik tablosunda yarışacaklardır. Program puanlaması aşağıdakilere göre yapılacaktır: - -**Subgraph Kapsamı** - -- Zincir başına subgraphlar için maksimum destek sağlıyor musunuz? - -- MIP'ler sırasında, büyük İndeksleyicilerin destekledikleri zincir başına subgraphlar'ın %50'sinden fazlasını stake etmeleri beklenir. - -**Hizmet Kalitesi** - -- İndeksleyici zincire iyi bir Hizmet Kalitesi (gecikme süresi, güncel veri, çalışma süresi, vb.) ile hizmet veriyor mu? - -- İndeksleyici, merkeziyetsiz uygulama geliştiricilerinin ihtiyaçlarına karşı reaktif olmalarını destekliyor mu? - -İndeksleyici ağın genel sağlığına katkıda bulunarak verimli bir şekilde tahsis ediyor mu? - -**Topluluk Desteği** - -- İndeksleyici, diğer İndeksleyicilerle çoklu zincire hazırlanmalarına yardımcı olmak için işbirliği yapıyor mu? - -- İndeksleyici program boyunca çekirdek geliştiricilere geri bildirim sağlıyor mu veya Forum'daki İndeksleyicilerle bilgi paylaşıyor mu? - -### 9. Discord rolü nasıl verilecek? - -Moderatörler önümüzdeki birkaç gün içinde rolleri verecektir. - -### 10. Programı bir test ağı üzerinde başlatmak ve daha sonra ana ağa geçmek sorun olur mu? Düğümümü tanımlayabilecek ve ödülleri dağıtırken dikkate alabilecek misiniz? - -Evet, aslında sizden bunu yapmanız bekleniyor. Birkaç aşama Görli'de ve bir tanesi de ana ağda. - -### 11. Katılımcıların hangi noktada bir ana ağ dağıtımı eklemesini bekliyorsunuz? - -Aşama 3 sırasında bir ana ağ indeksleyicisine sahip olma gereksinimi olacaktır. Bu konuda daha fazla bilgi [yakında bu notion sayfasında paylaşılacaktır.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Ödüller hak edişe(vesting) tabi olacak mı? - -Programın sonunda dağıtılacak yüzde, hak edişe tabi olacaktır. Bu konuda daha fazla bilgi İndeksleyici Sözleşmesinde paylaşılacaktır. - -### 13. Birden fazla üyesi olan ekipler için, tüm ekip üyelerine bir MIPs Discord rolü verilecek mi? - -Evet - -### 14. MIPs test ağına katılmak için graph küratör programından kilitli tokenleri kullanmak mümkün mü? - -Evet - -### 15. MIPs programı sırasında, geçersiz POI'ye itiraz etmek için bir süre olacak mı? - -Henüz kararlaştırılmadı. Daha fazla bilgi için lütfen bu sayfayı düzenli aralıklarla ziyaret edin veya talebiniz acilse lütfen info@thegraph.foundation adresine e-posta gönderin - -### 17. İki hakediş sözleşmesini birleştirebilir miyiz? - -Hayır. Seçenekler şunlardır: Birini diğerine devredebilir veya iki ayrı indeksleyici çalıştırabilirsiniz. - -### 18. KYC Soruları? - -Lütfen info@thegraph.foundation adresine e-posta gönderin - -### 19. Gnosis zincirini indekslemeye hazır değilim, hazır olduğumda başka bir zincirden ona geçip indekslemeye başlayabilir miyim? - -Evet - -### 20. Sunucuları çalıştırmak için önerilen bölgeler var mı? - -Bölgeler hakkında tavsiyelerde bulunmuyoruz. Konum seçerken, kripto para birimleri için büyük pazarların nerede olduğunu göz önünde bulundurmayı düşünebilirsiniz. - -### 21. "İşleyici gas maliyeti" nedir? - -Bir işleyiciyi yürütmenin maliyetinin deterministik ölçüsüdür. Adından da anlaşılabileceği gibi, blok zincirlerindeki gas maliyetiyle ilgisi yoktur. diff --git a/website/pages/tr/querying/_meta.js b/website/pages/tr/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/tr/querying/_meta.js +++ b/website/pages/tr/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/tr/querying/graph-client/_meta.js b/website/pages/tr/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/tr/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/uk/_meta.js b/website/pages/uk/_meta.js index c5052c7b7719..f2f3b56163a5 100644 --- a/website/pages/uk/_meta.js +++ b/website/pages/uk/_meta.js @@ -1,21 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), - network: 'Graph мережа', - '###1': { - type: 'heading', - title: 'Підграфи', - }, - developing: 'Розробка', - deploying: 'Запуск', - publishing: 'Публікація', - managing: 'Управління', - querying: 'Запити', - cookbook: 'Книга поетапних порад', - 'release-notes': 'Примітки до релізів та інструкції по оновленню', - '###3': { - type: 'heading', - title: 'Індексація', - }, + ...meta, } diff --git a/website/pages/uk/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/uk/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/uk/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/uk/deploying/hosted-service.mdx b/website/pages/uk/deploying/hosted-service.mdx deleted file mode 100644 index 82afd7c2c5dd..000000000000 --- a/website/pages/uk/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Створення субграфа - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/uk/deploying/subgraph-studio.mdx b/website/pages/uk/deploying/subgraph-studio.mdx deleted file mode 100644 index f2da63abff0b..000000000000 --- a/website/pages/uk/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/uk/developing/creating-a-subgraph.mdx b/website/pages/uk/developing/creating-a-subgraph.mdx deleted file mode 100644 index 972ca98361ce..000000000000 --- a/website/pages/uk/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Короткий огляд - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/uk/developing/creating-a-subgraph/_meta.js b/website/pages/uk/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/uk/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/uk/developing/graph-ts/_meta.js b/website/pages/uk/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/uk/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/uk/managing/deprecate-a-subgraph.mdx b/website/pages/uk/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/uk/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/uk/mips-faqs.mdx b/website/pages/uk/mips-faqs.mdx deleted file mode 100644 index 7f39862c23ec..000000000000 --- a/website/pages/uk/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Введення - -> Примітка: програма MIPs закрита з травня 2023 року. Дякуємо всім індексаторам, які взяли участь! - -Це чудовий час для того, щоб взяти участь в екосистемі The graph. Протягом [Graph Day 2022] (https://thegraph.com/graph-day/2022/) Yaniv Tal анонсував [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), момент, для якого екосистема The Graph працювала протягом багатьох років. - -Щоб підтримати завершення роботи хостингового сервісу та перенесення всієї активності в децентралізовану мережу, The Graph Foundation оголосив про [Migration Infrastructure Providers (crwd)lbracketdwrcMIPs program] (https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -Програма MIPs - це оплачувана програма для Індексаторів, яка надає їм необхідні ресурси для індексації різних мереж, за межами мережі Ethereum і допомагає протоколу The Graph розширити децентралізовану мережу до рівня мультичейн інфраструктури. - -На програму MIPs виділено 0.75% від загальної кількості токенів GRT (75 мільйонів GRT), з яких 0.5% буде використано для нагороди Індексаторів, які роблять свій вклад на бутстрап мережі та 0.25% зарезервовані під Network Grants для розробників підграфів, які використовують мультичейн підграфи. - -### Корисні посилання - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Чи можна згенерувати дійсний доказ індексації (POI), навіть у тому випадку, якщо підграф виявився невдалим? - -Так, це правда. - -Для довідки, arbitration charter \[дізнатися більше про charter можете тут (https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) визначає методологію генерації POI для невдалого підграфа. - -Учасник спільноти [SunTzu] (https://github.com/suntzu93) створив скрипт для автоматизації цього процесу відповідно до методології arbitration charter. Ознайомтеся з ним [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Яка мережа буде стимулюватись програмою MIPs в першу чергу? - -Перша мережа, яка буде підтримуватись в децентралізованій мережі - це Gnosis Chain! Раніше відома як xDAI, Gnosis Chain - це мережа на основі EVM. Gnosis Chain була обрана першою з огляду на зручність запуску нод, готовність до роботи з індексатором, узгодженість з The Graph та web3 адаптацію. - -### 3. Як нові мережі будуть додаватись до програми MIPs? - -Нові мережі будуть анонсовані протягом програми MIPs, відповідно до готовності Індексаторів, попиту та загального настрою спільноти. Мережі спочатку будуть працювати в тестнеті, а після цього GIP буде передано для підтримки цієї мережі в мейннеті. Індексатори, які беруть участь в програмі MIPs, будуть обирати, в підтримці якої мережі вони зацікавлені та отримуватимуть нагороди за кожну мережу, додатково до винагород, які вони вже отримують за запити та винагород за індексацію в мережі за обслуговування підграфів. Учасники програми MIPs будуть оцінюватися на основі їх ефективності, здатності обслуговувати потреби мережі та підтримки з боку спільноти. - -### 4. Як ми дізнаємось, коли мережа буде готова до додання нових блокчейнів? - -The Graph Foundation відстежуватиме показники якості обслуговування, продуктивність мережі та різні канали спільноти, щоб найкращим чином оцінити готовність. Пріоритетом є забезпечення того, щоб мережа відповідала вимогам продуктивності для тих мультичейн додатків, які зможуть перенести свої підграфи. - -### 5. Як розподіляються винагороди на кожну мережу? - -Враховуючи, що мережі відрізняються за своїми вимогами до синхронізаційних нод, а також за обсягом запитів і прийняттям, винагорода для кожної мережі буде визначатися в кінці її циклу, щоб гарантувати, що весь зворотний зв'язок і навчання будуть враховані. Однак індексатори завжди зможуть заробляти плату за запити та винагороду за індексацію, якщо ланцюжок буде підтримуватися в мережі. - -### 6. Чи потрібно індексувати всі мережі в програмі MIPs, чи можна вибрати лише одну і проіндексувати її? - -Ви можете індексувати будь-яку мережу, яка вам подобається! Мета програми MIPs - надати індексаторам інструменти та знання для індексування мереж, які вони бажають, і підтримки тих екосистем web3, які їх цікавлять. Однак, для кожної мережі є фази, починаючи від тестової мережі до мейннету. Переконайтеся, що ви пройшли всі ці фази для мереж, які ви індексуєте. Дивіться [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059), щоб дізнатись більше про ці фази. - -### 7. Коли будуть розподілені нагороди? - -Винагороди MIPs будуть розподілені між мережами, як тільки будуть досягнуті показники ефективності та перенесені підграфи будуть підтримуватися цими індексаторами. Шукайте інформацію про загальну кількість винагород для кожної мережі в середині її циклу. - -### 8. Як працює система підрахунку? - -Індексатори змагатимуться за нагороди на основі набраних балів в таблиці лідерів протягом програми. Підрахунок балів за програму базуватиметься на: - -**Покриття підграфів** - -- Чи забезпечується максимальна підтримка підграфів у мережі? - -- Очікується, що під час MIP великі Індексатори стейкатимуть 50%+ підграфів у кожній мережі, яку вони підтримують. - -**Якість обслуговування** - -- Чи забезпечує індексатор хорошу якість обслуговування мережі (затримки, свіжість даних, час безвідмовної роботи тощо)? - -- Чи реагує індексатор, що підтримує розробників додатків, на їхні потреби? - -Чи ефективно розподіляє ресурси Індексатор, сприяючи загальному стану мережі? - -**Підтримка спільноти** - -- Чи співпрацює Індексатор з іншими Індексаторами, щоб допомогти їм налаштуватися на мультичейн роботу? - -- Чи надає Індексатор зворотний зв'язок основним розробникам протягом програми або ділиться інформацією з іншими Індексаторами на Форумі? - -### 9. Як будуть розподілятись ролі в Discord? - -Модератори розподілять ролі протягом наступних кількох днів. - -### 10. Чи можна почати програму на тестовій мережі, а потім перейти в мейннет? Чи зможете ви ідентифікувати мою ноду і врахувати її при розподілі винагород? - -Так, саме це вам і потрібно зробити. Кілька фаз знаходяться на Görli та одна - в мейннеті. - -### 11. На якому етапі учасники розпочнуть розгортання в мейннеті, відповідно до ваших очікувань? - -Під час третьої фази буде вимагатися наявність індексатора в основній. Більше інформації про це ви можете отримати тут [скоро буде опубліковано на сторінці у Notion.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Чи будуть винагороди розблоковуватись поступово? - -Відсоток, який буде розподілений наприкінці програми, підлягатиме поступовому розблокуванню. Більше інформації про це буде надано в Indexer Agreement. - -### 13. Для команд з більш ніж одним учасником, чи всі члени команди отримають роль MIPs Discord? - -Так - -### 14. Чи можна використовувати заблоковані токени з програми кураторів The Graph для участі в тестнеті MIPs? - -Так - -### 15. Чи буде наданий період для оскарження недійсних POI під час програми MIPs? - -Це буде вирішено пізніше. Будь ласка, періодично повертайтеся на цю сторінку для отримання більш детальної інформації або, якщо ваш запит є терміновим, напишіть на пошту info@thegraph.foundation - -### 17. Чи можна об'єднати два вестинг контракти? - -Ні. Варіанти такі: ви можете делегувати один індексатор іншому або запустити два окремих індексатори. - -### 18. Питання по KYC? - -Будь ласка напишіть на пошту info@thegraph.foundation - -### 19. Я не готовий індексувати в мережі Gnosis, чи можу я пропустити цей етап і почати індексування з іншої мережі, коли буду готовий? - -Так - -### 20. Чи є рекомендовані регіони для запуску серверів? - -Ми не даємо рекомендацій щодо регіонів. Обираючи місце розташування, ви можете подумати про те, де знаходяться основні ринки криптовалют. - -### 21. Що таке "вартість газу для обслуговування"? - -Це детермінована міра вартості виконання обслуговування. Всупереч тому, що може здатися з назви, це ніяк не пов'язано з вартістю газу в блокчейні. diff --git a/website/pages/uk/network/_meta.js b/website/pages/uk/network/_meta.js index 19d6fd4679e4..49858537c885 100644 --- a/website/pages/uk/network/_meta.js +++ b/website/pages/uk/network/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - benefits: 'Переваги', } diff --git a/website/pages/uk/querying/_meta.js b/website/pages/uk/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/uk/querying/_meta.js +++ b/website/pages/uk/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/uk/querying/graph-client/_meta.js b/website/pages/uk/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/uk/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ur/_meta.js b/website/pages/ur/_meta.js index eefd252149dd..f2f3b56163a5 100644 --- a/website/pages/ur/_meta.js +++ b/website/pages/ur/_meta.js @@ -1,21 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), - network: 'گراف نیٹورک', - '###1': { - type: 'heading', - title: 'سبگراف', - }, - developing: 'ترقی پذیر', - deploying: 'تعیناتی', - publishing: 'اشاعت', - managing: 'انتظام', - querying: 'استفسار کرنا', - cookbook: 'ہدایت نامہ', - 'release-notes': 'ریلیز نوٹس اور اپ گریڈ گائیڈز', - '###3': { - type: 'heading', - title: 'انڈیکسنگ', - }, + ...meta, } diff --git a/website/pages/ur/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/ur/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 0febe076c1b2..000000000000 --- a/website/pages/ur/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: سب گراف کو ہوسٹڈ سروس پر تعینات کرنا ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## ایکسیس ٹوکن سٹور کریں - -اکاؤنٹ بنانے کے بعد، اپنے [ڈیش بورڈ](https://thegraph.com/hosted-service/dashboard) پر جائیں۔ ڈیش بورڈ پر دکھائے گئے رسائی ٹوکن کو کاپی کریں اور `graph auth --product hosted-service ` چلائیں۔ یہ ایکسیس ٹوکن کو آپ کے کمپیوٹر پر محفوظ کر دے گا۔ آپ کو یہ صرف ایک بار کرنے کی ضرورت ہے، یا اگر آپ کبھی ایکسیس ٹوکن کو دوبارہ تخلیق کرتے ہیں. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**تصویر** - سب گراف کے لیے پیش نظارہ تصویر اور تھمب نیل کے طور پر استعمال کرنے کے لیے ایک تصویر منتخب کریں. - -**سب گراف کا نام** - اکاؤنٹ کے نام کے ساتھ جس کے تحت سب گراف بنایا گیا ہے، یہ `account-name/subgraph-name`-سٹائل نام کی بھی وضاحت کرے گا جو تعیناتیوں اور گراف کیو ایل اینڈ پوائنٹس کے لیے استعمال ہوتا ہے۔ _اس فیلڈ کو بعد میں تبدیل نہیں کیا جاسکتا۔_ - -**اکاونٹ** - وہ اکاؤنٹ جس کے تحت سب گراف بنایا گیا ہے۔ یہ کسی فرد یا تنظیم کا اکاؤنٹ ہو سکتا ہے۔ _سب گرافس کو بعد میں اکاؤنٹس کے درمیان منتقل نہیں کیا جا سکتا۔_ - -**سب ٹائٹل** - متن جو سب گراف کارڈز میں ظاہر ہوگا. - -**تفصیل** - سب گراف کی تفصیل، سب گراف کی تفصیلات کے صفحہ پر دکھائی دیتی ہے. - -**گٹ ہب URL** - گٹ ہب پر سب گراف ریپوزٹری سے لنک کریں. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -آپ `yarn deploy` چلا کر سب گراف کو تعینات کرتے ہیں - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -ایک بار جب گراف نوڈ تاریخی بلاکس سے تمام ڈیٹا نکال لیتا ہے تو سب گراف کی حیثیت `Synced` میں بدل جاتی ہے۔ گراف نوڈ آپ کے سب گراف کے لیے بلاکس کا معائنہ کرتا رہے گا کیونکہ ان بلاکس کی کان کنی کی گئی ہے. - -## سب گراف کو دوبارہ سے تعینات کرنا - -اپنے سب گراف کی تعریف میں تبدیلی کرتے وقت، مثال کے طور پر، ہستی میپنگ میں کسی مسئلے کو حل کرنے کے لیے، اپنے سب گراف کے اپ ڈیٹ شدہ ورژن کو تعینات کرنے کے لیے اوپر `yarn deploy` کمانڈ دوبارہ چلائیں۔ سب گراف کی کسی بھی اپڈیٹ کے لیے ضروری ہے کہ گراف نوڈ آپ کے پورے سب گراف کو دوبارہ ترتیب دے، دوبارہ جینیسس بلاک سے شروع ہو. - -اگر آپ کا پہلے سے تعینات کردہ سب گراف اب بھی حالت میں ہے `Syncing`، تو اسے فوری طور پر نئے تعینات کردہ ورژن سے بدل دیا جائے گا۔ اگر پہلے سے تعینات کیا گیا سب گراف پہلے سے ہی مکمل طور پر مطابقت پذیر ہے، تو گراف نوڈ نئے تعینات شدہ ورژن کو `Pending Version` کے بطور نشان زد کرے گا، اسے بیک گراؤنڈ میں ہم آہنگ کرے گا، اور ایک بار مطابقت پذیر ہونے کے بعد صرف موجودہ تعینات کردہ ورژن کو نئے سے بدل دے گا۔ نیا ورژن ختم ہو گیا ہے. یہ یقینی بناتا ہے کہ نیا ورژن مطابقت پذیر ہونے کے دوران آپ کے پاس کام کرنے کے لیے ایک سب گراف موجود ہے. - -## سب گراف کو متعدد نیٹ ورکس پر تعینات کرنا - -کچھ معاملات میں، آپ ایک ہی سب گراف کو متعدد نیٹ ورکس پر اس کے تمام کوڈ کی نقل کیے بغیر تعینات کرنا چاہیں گے۔ اس کے ساتھ آنے والا بنیادی چیلنج یہ ہے کہ ان نیٹ ورکس پر کنٹریکٹ ایڈریس مختلف ہیں. - -### Graph-cli کا استعمال کرنا - -دونو `graph build` (جب سے `v0.29.0`) اور `graph deploy` (جب سے `v0.32.0`) دو نئے اختیارات قبول کرتے ہیں: - -```sh -اختیارات: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -آپ آسانی سے اپ ڈیٹ کرنے کے لیے ایک `json` معیاری فائل (ڈیفالٹ `networks.json`) سے نیٹ ورک کنفیگریشن کی وضاحت کرنے کے لیے `--network` ترقی کے دوران اپنے سب گراف کو آسانی سے اپ ڈیٹ کرنے کے لیے. - -**نوٹ:** `init` کمانڈ اب فراہم کردہ معلومات کی بنیاد پر ایک `networks.json` خود بخود تیار کرے گی۔ اس کے بعد آپ موجودہ کو اپ ڈیٹ کرنے یا اضافی نیٹ ورکس کو شامل کرنے کے قابل ہو جائیں گے. - -اگر آپ کے پاس `networks.json` فائل نہیں ہے، تو آپ کو دستی طور پر درج ذیل ڈھانچے کے ساتھ ایک فائل بنانے کی ضرورت ہوگی: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**نوٹ:** آپ کو تشکیل فائل میں کسی بھی `templates` (اگر آپ کے پاس کوئی ہے) کی وضاحت کرنے کی ضرورت نہیں ہے، صرف `dataSources`۔ اگر `subgraph.yaml` فائل میں کوئی بھی `templates` کا اعلان کیا گیا ہے، تو ان کا نیٹ ورک خود بخود `--network` اختیار کے ساتھ مخصوص کردہ سے اپ ڈیٹ ہوجائے گا. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -آپ کے نیٹ ورکس کی تشکیل فائل کو اس طرح نظر آنا چاہئے: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -اب ہم زیل میں دی گئ کمانڈز میں سے ایک چلا سکتے ہیں: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -اب آپ `yarn deploy` کے لیے تیار ہیں. - -**نوٹ:** جیسا کہ پہلے ذکر کیا گیا ہے، `graph-cli 0.32.0` کے بعد سے آپ براہ راست `yarn deploy` کو چلا سکتے ہیں `--network` کے اختیار سے: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Subgraph.yaml ٹیمپلیٹ استعمال کرنا - -پرانے گراف-کلی ورژن کے لیے ایک حل جو کنٹریکٹ ایڈریس جیسے پہلوؤں کو پیرامیٹرائز کرنے کی اجازت دیتا ہے یہ ہے کہ [مس ٹیش](https://mustache.github.io/) جیسے ٹیمپلیٹنگ سسٹم کا استعمال کرتے ہوئے اس کے کچھ حصے بنانا [ہینڈل بارز](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -اور - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -اس کے ساتھ، آپ مینی فیسٹ میں نیٹ ورک کے نام اور پتے کو متغیر پلیس ہولڈرز `{{network}}` اور `{{address}}` سے بدل دیں گے اور مینی فیسٹ کا نام بدل کر مثال کے طور پر رکھیں گے. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -کسی بھی نیٹ ورک پر مینی فیسٹ بنانے کے لیے، آپ `package.json` میں دو اضافی کمانڈز شامل کر سکتے ہیں اور ساتھ ہی `mustache` پر انحصار: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -اس کی عملی مثال [یہاں](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759) مل سکتی ہے. - -**نوٹ:** یہ نقطہ نظر زیادہ پیچیدہ حالات پر بھی لاگو کیا جا سکتا ہے، جہاں کنٹریکٹ ایڈریسز اور نیٹ ورک کے ناموں سے زیادہ کو تبدیل کرنا ضروری ہے یا جہاں ٹیمپلیٹس سے میپنگ یا ABIs تیار کرنا بھی ضروری ہے. - -## سب گراف کی صحت کی جانچ کرنا - -اگر ایک سب گراف کامیابی کے ساتھ مطابقت پذیر ہوتا ہے، تو یہ ایک اچھی علامت ہے کہ یہ ہمیشہ کے لیے اچھی طرح چلتا رہے گا۔ تاہم، نیٹ ورک پر نئے محرکات آپ کے سب گراف کو بغیر جانچ کی خرابی کی حالت کو نشانہ بنا سکتے ہیں یا کارکردگی کے مسائل یا نوڈ آپریٹرز کے ساتھ مسائل کی وجہ سے یہ پیچھے پڑنا شروع کر سکتا ہے. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -اس سے آپ کو `chainHeadBlock` ملے گا جس کا موازنہ آپ اپنے سب گراف پر موجود `latestBlock` سے کر سکتے ہیں تاکہ یہ چیک کیا جا سکے کہ آیا یہ پیچھے چل رہا ہے۔ `synced` مطلع کرتا ہے کہ کیا سب گراف کبھی زنجیر تک پہنچ گیا ہے۔ `health` فی الحال `healthy` کی اقدار لے سکتا ہے اگر کوئی خرابی نہیں ہوئی، یا اگر کوئی خرابی تھی جس نے سب گراف کی پیشرفت کو روکا تو `failed`۔ اس صورت میں، آپ اس خرابی کی تفصیلات کے لیے `fatalError` فیلڈ کو چیک کر سکتے ہیں. - -## ہوسٹڈ سروس سب گراف آرکائیو پالیسی - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## سب گراف سٹوڈیو سب گراف آرکائیو پالیسی - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -اس پالیسی سے متاثر ہونے والے ہر سب گراف کے پاس زیر بحث ورژن کو واپس لانے کا اختیار ہے. diff --git a/website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 9d2d22703352..000000000000 --- a/website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: سب گراف کو سب گراف سٹوڈیو پر تعینات کرنا ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- گراف CLI انسٹال کریں (یارن یا این پی ایم کے ساتھ) -- اپنے سب گراف سب گراف سٹوڈیو میں بنائیں -- CLI سے اپنے اکاؤنٹ کی تصدیق کریں -- سب گراف کو سب گراف سٹوڈیو پر تعینات کرنا - -## گراف CLI انسٹال کرنا - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**yarn سے انسٹال کریں:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Npm سے انسٹال کریں:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## اپنے سب گراف سب گراف سٹوڈیو میں بنائیں - -اپنے اصل سب گراف کو تعینات کرنے سے پہلے آپ کو [سب گراف سٹوڈیو](https://thegraph.com/studio/) میں ایک سب گراف بنانا ہوگا۔ اس بارے میں مزید جاننے کے لیے ہمارا مشورہ ہے کہ آپ ہماری [سٹوڈیو دستاویزات](/deploying/subgraph-studio) پڑھیں. - -## اپنا سب گراف شروع کریں - -ایک بار جب آپ کا سب گراف سب گراف سٹوڈیو میں بن جاتا ہے تو آپ اس کمانڈ کا استعمال کرکے سب گراف کوڈ کو شروع کر سکتے ہیں: - -```bash -graph init --studio -``` - -`` قدر سب گراف سٹوڈیو میں آپ کے سب گراف کی تفصیلات کے صفحہ پر مل سکتی ہے: - -![سب گراف سٹوڈیو - Slug](/img/doc-subgraph-slug.png) - -`graph init` چلانے کے بعد، آپ سے کنٹریکٹ ایڈریس، نیٹ ورک، اور ABI کو ان پٹ کرنے کو کہا جائے گا جس سے آپ کیوری کرنا چاہتے ہیں۔ ایسا کرنے سے آپ کی مقامی مشین پر کچھ بنیادی کوڈ کے ساتھ ایک نیا فولڈر بن جائے گا جو آپ کے سب گراف پر کام کرنا شروع کر دے گا۔ اس کے بعد آپ اپنے سب گراف کو حتمی شکل دے سکتے ہیں تاکہ یہ یقینی بنایا جا سکے کہ یہ توقع کے مطابق کام کرتا ہے. - -## Graph Auth - -اپنا سب گراف سب گراف سٹوڈیو میں تعینات کرنے کے قابل ہونے سے پہلے، آپ کو CLI کے اندر اپنے اکاؤنٹ میں لاگ ان کرنے کی ضرورت ہے۔ ایسا کرنے کے لیے، آپ کو اپنی تعیناتی کلید کی ضرورت ہوگی جو آپ اپنے "میرے سب گراف" کے صفحہ یا اپنے سب گراف کی تفصیلات کے صفحہ پر تلاش کر سکتے ہیں. - -یہاں وہ کمانڈ ہے جو آپ کو CLI سے تصدیق کرنے کے لیے استعمال کرنے کی ضرورت ہے: - -```bash -graph auth --studio -``` - -## سب گراف کو سب گراف سٹوڈیو پر تعینات کرنا - -ایک بار جب آپ تیار ہو جائیں تو، آپ اپنا سب گراف سب گراف اسٹوڈیو میں تعینات کر سکتے ہیں۔ ایسا کرنے سے آپ کے سب گراف کو ڈیسینٹرالائزڈ نیٹ ورک پر شائع نہیں کیا جائے گا، یہ اسے صرف آپ کے اسٹوڈیو اکاؤنٹ میں تعینات کرے گا جہاں آپ اس کی جانچ کر سکیں گے اور میٹا ڈیٹا کو اپ ڈیٹ کر سکیں گے. - -یہاں CLI کمانڈ ہے جو آپ کو اپنے سب گراف کو تعینات کرنے کے لیے استعمال کرنے کی ضرورت ہے. - -```bash -graph deploy --studio -``` - -اس کمانڈ کو چلانے کے بعد، CLI ایک ورژن لیبل کے لیے کہے گا، آپ اسے جیسا چاہیں نام دے سکتے ہیں، آپ `0.1` اور `0.2` جیسے لیبل استعمال کرسکتے ہیں یا حروف بھی استعمال کرسکتے ہیں۔ جیسے `uniswap-v2-0.1`۔ وہ لیبلز گراف ایکسپلورر میں نظر آئیں گے اور کیوریٹرز یہ فیصلہ کرنے کے لیے استعمال کر سکتے ہیں کہ آیا وہ اس ورژن پر سگنل دینا چاہتے ہیں یا نہیں، لہذا ان کا انتخاب دانشمندی سے کریں. - -ایک بار تعیناتی کے بعد، آپ پلے گراؤنڈ کا استعمال کرتے ہوئے سب گراف سٹوڈیو میں اپنے سب گراف کی جانچ کر سکتے ہیں، ضرورت پڑنے پر دوسرا ورژن تعینات کر سکتے ہیں، میٹا ڈیٹا کو اپ ڈیٹ کر سکتے ہیں، اور جب آپ تیار ہوں، تو اپنا سب گراف گراف ایکسپلورر پر شائع کر سکتے ہیں. diff --git a/website/pages/ur/deploying/hosted-service.mdx b/website/pages/ur/deploying/hosted-service.mdx deleted file mode 100644 index a5c7f1ea9e33..000000000000 --- a/website/pages/ur/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: ہوسٹڈ سروس کیا ہے؟ ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -یہ سیکشن آپ کو [ہوسٹڈ سروس](https://thegraph.com/hosted-service/) میں سب گراف کی تعیناتی میں لے جائے گا۔ - -اگر آپ کے پاس ہوسٹڈ سروس پر کوئی اکاؤنٹ نہیں ہے، تو آپ اپنے گٹ ہب اکاؤنٹ کے ساتھ سائن اپ کر سکتے ہیں۔ ایک بار جب آپ تصدیق کر لیتے ہیں، تو آپ UI کے ذریعے سب گراف بنانا شروع کر سکتے ہیں اور انہیں اپنے ٹرمینل سے تعینات کر سکتے ہیں۔ ہوسٹڈ سروس متعدد نیٹ ورکس کو سپورٹ کرتی ہے، جیسے Polygon، Gnosis چین، BNB چین، Optimism، Arbitrum، اور بہت کچھ. - -ایک جامع فہرست کے لیے، [تعاون یافتہ نیٹ ورکس](/developing/supported-networks/#hosted-service) دیکھیں. - -## سب گراف بنائیں - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### ایک موجودہ کنٹریکٹ سے - -اگر آپ کے پاس پہلے سے ہی اپنی پسند کے نیٹ ورک پر ایک سمارٹ کنٹریکٹ تعینات ہے، تو اس کنٹریکٹ سے ایک نیا سب گراف بوٹسٹریپ کرنا ہوسٹڈ سروس شروع کرنے کا ایک اچھا طریقہ ہو سکتا ہے۔ - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -اس معاملے میں `` آپ کا گٹ ہب صارف یا تنظیم کا نام ہے، `` آپ کے سب گراف کا نام ہے، اور `` ڈائرکٹری کا اختیاری نام ہے جہاں `graph init` مثال سب گراف مینی فیسٹ ڈالے گا۔ `` آپ کے موجودہ کنٹریکٹس کا پتہ ہے۔ `` اس نیٹ ورک کا نام ہے جس پر کنٹریکٹ رہتا ہے۔ `` کنٹریکٹ ABI فائل کا مقامی راستہ ہے۔ **دونوں `--network` اور `--abi` اختیاری ہیں.** - -### ایک مثالی سب گراف سے - -دوسرا موڈ `graph init` سپورٹ کرتا ہے مثال کے سب گراف سے ایک نیا پروجیکٹ بنا رہا ہے۔ مندرجہ ذیل کمانڈ یہ کرتا ہے: - -``` -graph init --from-example --product hosted-service / [] -``` - -مثال کا سب گراف Dani گرانٹ کے گریوٹی کنٹریکٹ پر مبنی ہے جو صارف کے اوتاروں کا انتظام کرتا ہے اور `NewGravatar` یا `UpdateGravatar` ایونٹس کو خارج کرتا ہے جب بھی اوتار بنائے یا اپ ڈیٹ ہوتے ہیں۔ سب گراف گراف نوڈ اسٹور پر `Gravatar` اداروں کو لکھ کر اور اس بات کو یقینی بنا کر کہ یہ واقعات کے مطابق اپ ڈیٹ ہو کر ان ایونٹس کو سنبھالتا ہے۔ بہتر طور پر یہ سمجھنے کے لیے کہ آپ کے سمارٹ کنٹریکٹ میں سے کن ایونٹس، میپنگز وغیرہ پر توجہ دینا ہے، [سب گراف مینی فیسٹ](/developing/creating-a-subgraph#the-subgraph-manifest) پر جاری رکھیں. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## ہوسٹڈ سروس پر سپورٹڈ نیٹ ورکس - -آپ کو تعاون یافتہ نیٹ ورکس کی فہرست [یہاں](/developing/supported-networks) مل سکتی ہے. diff --git a/website/pages/ur/deploying/subgraph-studio.mdx b/website/pages/ur/deploying/subgraph-studio.mdx deleted file mode 100644 index b868b5bff940..000000000000 --- a/website/pages/ur/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -آپ کے نئے لانچ پیڈ میں خوش آمدید 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- سٹوڈیو UI کے ذریعے سے سب گراف بنائیں -- CLI کے استعمال سے سب گراف تعینات کریں -- سٹوڈیو UI سے سب گراف شائع کریں -- پلے گراؤنڈ میں اسے ٹیسٹ کریں -- کیوری URL کا استعمال کرتے ہوئے اسٹیجنگ میں ضم کریں -- مخصوص سب گرافس کے لۓ API کیز بنائیں اور ان کا انتظام کریں - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -سب گراف سے کیوری کرنے سے کیوری کی فیس بنتی ہے، جو گراف نیٹ ورک پر [انڈیکسرز](/network/indexing) کو انعام دینے کے لیے استعمال ہوتی ہے۔ اگر آپ ڈیپ ڈویلپر یا سب گراف ڈویلپر ہیں، تو اسٹوڈیو آپ کو اپنے یا آپ کی کمیونٹی کے کیوریز کو طاقت دینے کے لیے بہتر سب گراف بنانے کے لیے بااختیار بنائے گا۔ سٹوڈیو 5 اہم حصوں پر مشتمل ہے: - -- آپ کا صارف اکاؤنٹ کنٹرول کرتا ہے -- سب گرافس کی ایک فہرست جو آپ نے بنائی ہے -- انتظام کرنے، تفصیلات دیکھنے اور مخصوص سب گراف کی حیثیت کو دیکھنے کے لیے ایک سیکشن -- آپ کی API کیز کا نظم کرنے کے لیے ایک سیکشن جس میں آپ کو ایک سب گراف سے کیوری کرنے کی ضرورت ہوگی -- آپ کی بلنگ کا نظم کرنے کے لیے ایک سیکشن - -## اپنا اکاؤنٹ کیسے بنائیں - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. ایک بار جب آپ سائن ان کریں گے، آپ کو اپنے اکاؤنٹ کے ہوم پیج پر اپنی منفرد تعیناتی کلید نظر آئے گی۔ یہ آپ کو اپنے سب گراف شائع کرنے یا اپنی API کیز + بلنگ کا نظم کرنے کی اجازت دے گا۔ آپ کے پاس ایک منفرد تعیناتی کلید ہوگی جسے دوبارہ تیار کیا جا سکتا ہے اگر آپ کو لگتا ہے کہ اس سے سمجھوتہ کیا گیا ہے. - -## How to Create a Subgraph in Subgraph Studio - - - -## گراف نیٹ ورک کے ساتھ سب گراف مطابقت - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- [تعاون یافتہ نیٹ ورک](/developing/supported-networks) کو انڈیکس کریں -- درج ذیل خصوصیات میں سے کوئی بھی استعمال نہیں کرنا چاہیے: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -گراف نیٹ ورک میں بتدریج مزید خصوصیات اور نیٹ ورکس شامل کیے جائیں گے. - -### سب گراف لائف سائیکل کا بہاؤ - -![سب گراف لائف سائیکل](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## سب گراف سٹوڈیو میں اپنے سب گراف کی جانچ کرنا - -اگر آپ اپنے سب گراف کو نیٹ ورک پر شائع کرنے سے پہلے اس کی جانچ کرنا چاہتے ہیں، تو آپ یہ سب گراف **پلے گراؤنڈ** میں کر سکتے ہیں یا اپنے لاگز کو دیکھ سکتے ہیں۔ سب گراف لاگز آپ کو بتائیں گے کہ آپ کا سب گراف اس صورت میں **کہاں** ناکام ہو جاتا ہے. - -## اپنا سب گراف سب گراف سٹوڈیو میں شائع کریں - -آپ نے اسے یہاں تک پہنچا دیا ہے - مبارک ہو! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -ذیل میں ویڈیو کا جائزہ بھی دیکھیں: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -انڈیکسرز کو ایک مخصوص بلاک ہیش کے مطابق انڈیکسنگ کے ریکارڈ کا لازمی ثبوت جمع کرانے کی ضرورت ہے۔ چونکہ سب گراف کو شائع کرنا ایک آن چین عمل ہے، یاد رکھیں کہ ٹرانزیکشن کو گزرنے میں چند منٹ لگ سکتے ہیں۔ کنٹریکٹ شائع کرنے کے لیے آپ جو بھی پتہ استعمال کرتے ہیں وہی مستقبل کے ورژن شائع کرنے کے قابل ہوگا۔ سمجھداری سے انتخاب کرو! - -کیوریشن سگنل کے ساتھ سب گراف انڈیکسرز کو دکھائے جاتے ہیں تاکہ ان کو ڈیسینٹرالائزڈ نیٹ ورک پر انڈیکس کیا جا سکے۔ آپ ایک ٹرانزیکشن میں سب گراف اور سگنل شائع کر سکتے ہیں، جو آپ کو سب گراف پر پہلے کیوریشن سگنل کو ٹکسال کرنے کی اجازت دیتا ہے اور گیس کے اخراجات کو بچاتا ہے۔ بعد میں کیوریٹرز کے ذریعہ فراہم کردہ سگنل میں اپنے سگنل کو شامل کرنے سے، آپ کے سب گراف میں بالآخر سوالات کو پیش کرنے کا زیادہ امکان ہوگا. - -**اب جب کہ آپ نے اپنا سب گراف شائع کر دیا ہے، آئیے دیکھتے ہیں کہ آپ ان کا باقاعدہ انتظام کیسے کریں گے۔** نوٹ کریں کہ اگر آپ اپنا سب گراف نیٹ ورک پر شائع نہیں کر سکتے مطابقت پذیری میں ناکام یہ عام طور پر اس لیے ہوتا ہے کہ سب گراف میں بگس ہوتے ہیں - نوشتہ جات آپ کو بتائیں گے کہ وہ مسائل کہاں موجود ہیں! - -## اپنے سب گراف کو CLI کے ساتھ ورژن بنانا - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -براہ کرم نوٹ کریں کہ نیٹ ورک پر سب گراف کے نئے ورژن کی اشاعت سے وابستہ اخراجات ہیں۔ ٹرانزیکشن کی فیس کے علاوہ، ڈویلپرز کو خودکار منتقلی کے سگنل پر کیوریشن ٹیکس کے ایک حصے کو بھی فنڈ کرنا ہوگا۔ آپ اپنے سب گراف کا نیا ورژن شائع نہیں کر سکتے اگر کیوریٹرز نے اس پر اشارہ نہیں کیا ہے۔ کیوریشن کے خطرات کے بارے میں مزید معلومات کے لیے، براہ کرم مزید پڑھیں [یہاں](/network/curating). - -### سب گراف ورژن کی خودکار آرکائیونگ - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![سب گراف سٹوڈیو - Unarchive](/img/Unarchive.png) diff --git a/website/pages/ur/developing/creating-a-subgraph.mdx b/website/pages/ur/developing/creating-a-subgraph.mdx deleted file mode 100644 index e110558b3d63..000000000000 --- a/website/pages/ur/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: سب گراف بنانا ---- - -ایک سب گراف بلاکچین سے ڈیٹا نکالتا ہے, اس پر کارروائی کرتا ہے اور اسے ذخیرہ کرتا ہے تاکہ GraphQL کے ذریعے آسانی سے کیوری کیا جا سکے. - -![سب گراف کی تعریف](/img/defining-a-subgraph.png) - -سب گراف کی تعریف چند فائلوں پر مشتمل ہے: - -- `subgraph.yaml`: سب گراف مینی فیسٹ پر مشتمل ایک YAML فائل ہے - -- `schema.graphql`: ایک GraphQL اسکیما جو اس بات کی وضاحت کرتا ہے کہ آپ کے سب گراف کے لیے کون سا ڈیٹا محفوظ ہے، اور GraphQL کے ذریعے اسے کیوری کیسے کیا جائے - -- `AssemblyScript Mappings`: [اسمبلی اسکرپٹ](https://github.com/AssemblyScript/assemblyscript) کوڈ جو ایونٹ کے ڈیٹا سے آپ کے اسکیما کی ہستیوں میں تبدیل کرتا ہے (جیسے `mapping.ts` اس ٹیوٹوریل میں) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## گراف CLI انسٹال کریں - -گراف CLI کو جاوا سکرپٹ میں لکھا کیا ہے, اور آپ کو اسے استعمال کرنے کے لیے یا تو `yarn` یا `npm` انسٹال کرنے کی ضرورت ہوگی; یہ فرض کیا جاتا ہے کہ آپ کے پاس مندرجہ ذیل میں سے yarn ہے. - -ایک بار جب آپ کے پاس `yarn` آجائے تو، یہ چلا کر Graph CLI انسٹال کریں - -**yarn کے ساتھ انسٹال کریں:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**npm کے ساتھ انسٹال کریں:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## ایک موجودہ کنٹریکٹ سے - -مندرجہ ذیل کمانڈ ایک سب گراف بناتا ہے جو موجودہ کنٹریکٹ کے تمام ایوینٹس کو انڈیکس کرتا ہے. یہ ایتھر سکین سے کنٹریکٹ ABI حاصل کرنے کی کوشش کرتا ہے اور مقامی فائل پاتھ کی درخواست کرنے پر واپس آتا ہے. اگر اختیاری انتخابات میں سے کوئی غائب ہے، تو یہ آپ کو ایک انٹرایکٹو فارم پر لے جاتا ہے. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` سب گراف سٹوڈیو میں آپ کے سب گراف کی ID ہے, یہ آپ کے سب گراف کی تفصیلات کے صفحہ پر پائی جا سکتی ہے. - -## ایک مثال کے سب گراف سے - -دوسرا موڈ `graph init` سپورٹ کرتا ہے مثال کے سب گراف سے ایک نیا پروجیکٹ بنا رہا ہے. درج ذیل کمانڈ یہ کرتی ہے: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## موجودہ سب گراف میں نئے ڈیٹا سورسز شامل کریں - -`v0.31.0` سے اب تک `graph add`, `graph-cli` کمانڈ کے ذریعے موجودہ سب گراف میں نئے ڈیٹا سورسز کو شامل کرنے کی حمایت کرتا ہے. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -`add` کمانڈ ایتھر سکین سے ABI لے آئے گی (جب تک کہ `--abi` آپشن کے ساتھ ABI کا پاتھ متعین نہ کیا جائے)، اور ایک نیا `dataSource` بنائے گا۔ اسی طرح جس طرح `graph init` کمانڈ ایک `dataSource` `--from-contract` سے تخلیق کرتی ہے، اس کے مطابق اسکیما اور میپنگس کو اپ ڈیٹ کرتی ہے. - -`--merge-entities` کا اپشن اس بات کی نشاندہی کرتا ہے کہ ڈیولپر کس طرح `entity` اور `event` نام کے تنازعات سے نمٹنا چاہے گا: - -- اگر `true`: نئے `data source` کو موجودہ `eventHandlers` اور `entities` کا استعمال کرنا چاہیے. -- اگر `false`: ایک نئی اینٹیٹی اور ایونٹ ہینڈلر کو `${dataSourceName}{EventName}` کے ساتھ بنایا جانا چاہیے. - -کنٹریکٹ `address` متعلقہ نیٹ ورک کے لیے `networks.json` پر لکھا جائے گا. - -> **نوٹ:** انٹرایکٹو cli کا استعمال کرتے وقت، `graph init` کو کامیابی سے چلانے کے بعد، آپ کو ایک نیا `dataSource` شامل کرنے کا کہا جائے گا. - -## سب گراف مینی فیسٹ - -سب گراف مینی فیسٹ `subgraph.yaml` آپ کے سب گراف کے انڈیکس کردہ سمارٹ کنٹریکٹ کی وضاحت کرتا ہے, ان کنٹریکٹس میں سے کن ایوینٹس پر توجہ دی جائے, اور ایونٹ کے ڈیٹا کو ان ہستیوں کے ساتھ میپ کرنے کا طریقہ جو گراف نوڈ ذخیرہ کرتا ہے اور کیوری کرنے کی اجازت دیتا ہےاور ایونٹ کے ڈیٹا کو ان ہستیوں کے ساتھ میپ کرنے کا طریقہ جو گراف نوڈ ذخیرہ کرتا ہے اور کیوری کرنے کی اجازت دیتا ہے. سب گراف مینی فیسٹ کے لیے مکمل تفصیلات [یہاں](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md) مل سکتی ہیں. - -مثال کے سب گراف کے لیے، `subgraph.yaml` یہ ہے: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -مینی فیسٹ کے لیے اپ ڈیٹ کرنے کے لیے اہم اندراجات یہ ہیں: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: تمام استعمال شدہ [نمایاں](#experimental-features) ناموں کی فہرست. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: سمارٹ کنٹریکٹ کا ایڈریس جو سب گراف کا ذریعہ ہے, اور استعمال کرنے کے لیے سمارٹ کنٹریکٹ کا ABI. ایڈریس اختیاری ہے; اسے چھوڑنا تمام کنٹریکٹس سے مماثل ایونٹس کو انڈیکس کرنے کی اجازت دیتا ہے. - -- `dataSources.source.startBlock`: بلاک کا اختیاری نمبر جس سے ڈیٹا سورس انڈیکس کرنا شروع کرتا ہے. زیادہ تر معاملات میں، ہم اس بلاک کو استعمال کرنے کا مشورہ دیتے ہیں جس میں کنٹریکٹ بنایا گیا تھا. - -- `dataSources.source.endBlock`: بلاک کا اختیاری نمبر جس پر ڈیٹا سورس انڈیکس کرنا روکتا ہے، اس بلاک سمیت۔ کم از کم مخصوص ورژن درکار ہے: `0.0.9`۔ - -- `dataSources.context`: کلیدی ویلیو کے جوڑے جو سب گراف میپنگ میں استعمال کیے جاسکتے ہیں۔ مختلف قسم کے ڈیٹا کو سپورٹ کرتا ہے جیسے `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`، `List`، اور `BigInt`۔ ہر متغیر کو اپنی `type` اور `data` کی وضاحت کرنے کی ضرورت ہے۔ یہ سیاق و سباق کے متغیرات پھر میپنگ فائلوں میں قابل رسائی ہوتے ہیں، جو سب گراف کی ترقی کے لیے مزید قابل ترتیب اختیارات پیش کرتے ہیں۔ - -- `dataSources.mapping.entities`: وہ اینٹیٹیز جنہیں ڈیٹا سورس اسٹور کو لکھتا ہے۔ schema.graphql فائل میں ہر اینٹیٹی کے لیے اسکیما کی وضاحت کی گئی ہے. - -- `dataSources.mapping.abis`: سورس کنٹریکٹ کے لیے ایک یا زیادہ ABI فائلیں اور ساتھ ہی کسی دوسرے سمارٹ کنٹریکٹ کے لیے جن کے ساتھ آپ میپنگ کے اندر سے تعامل کرتے ہیں. - -- `dataSources.mapping.eventHandlers`: ان سمارٹ کنٹریکٹ ایونٹس کی فہرست دیتا ہے جن پر یہ سب گراف رد عمل ظاہر کرتا ہے اور میپنگ میں ہینڈلرز— مثال میں./src/mapping.ts — جو ان واقعات کو اسٹور میں موجود اینٹیٹیز میں تبدیل کرتے ہیں. - -- `dataSources.mapping.callHandlers`: ان سمارٹ کنٹریکٹ فنکشنز کی فہرست بناتا ہے جن پر یہ سب گراف رد عمل ظاہر کرتا ہے اور میپنگ میں ہینڈلرز جو ان پٹس اور آوٹ پٹس کو فنکشن کالز کو اسٹور میں موجود اینٹیٹیز میں تبدیل کرتے ہیں. - -- `dataSources.mapping.blockHandlers`: ان بلاکس کی فہرست بناتا ہے جن پر یہ سب گراف رد عمل ظاہر کرتا ہے اور میپنگ میں ہینڈلرز کو چلانے کے لیے جب ایک بلاک کو چین میں شامل کیا جاتا ہے. فلٹر کے بغیر، بلاک ہینڈلر ہر بلاک کو چلایا جائے گا. ایک اختیاری کال فلٹر ہینڈلر کو `kind: call` کے ساتھ ایک `filter` فیلڈ شامل کرکے فراہم کیا جاسکتا ہے. یہ صرف ہینڈلر کو چلائے گا اگر بلاک میں ڈیٹا سورس کنٹریکٹ پر کم از کم ایک کال ہو. - -ایک واحد سب گراف متعدد سمارٹ کنٹریکٹ سے ڈیٹا کو انڈیکس کر سکتا ہے. ہر کنٹریکٹ کے لیے ایک اندراج شامل کریں جس سے ڈیٹا کو `dataSources` ایرے میں انڈیکس کرنے کی ضرورت ہے. - -### Order of Triggering Handlers - -بلاک کے اندر ڈیٹا سورس کے لیے محرکات درج ذیل عمل کا استعمال کرتے ہوئے ترتیب دیے گئے ہیں: - -1. ایونٹ اور کال ٹریگرز کو پہلے بلاک کے اندر ٹرانزیکشن انڈیکس سے ترتیب دیا جاتا ہے. -2. ایک ہی ٹرانزیکشن کے اندر ایونٹ اور کال ٹرگرز کو روایت کا استعمال کرتے ہوئے ترتیب دیا جاتا ہے: پہلے ایونٹ ٹرگرز پھر کال ٹرگرز، ہر قسم اس ترتیب کا احترام کرتی ہے جس کی وضاحت مینی فیسٹ میں کی گئی ہے. -3. بلاک ٹریگرز ایونٹ اور کال ٹریگرز کے بعد چلائے جاتے ہیں، اس ترتیب میں جس کی وضاحت مینی فیسٹ میں کی گئی ہے. - -ترتیب دینے کے یہ اصول تبدیل کیے جا سکتے ہیں. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| ورزن | جاری کردہ نوٹس | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### ABIs حاصل کرنا - -ABI فائل (فائلیں) آپ کے کنٹریکٹ (کنٹریکٹس) سے مماثل ہونی چاہیں. ABI کی فائلیں حاصل کرنے کے چند طریقے ہیں: - -- اگر آپ اپنا پراجیکٹ خود بنا رہے ہیں، تو ممکنہ طور پر آپ کو اپنے حالیہ ABIs تک رسائی حاصل ہوگی. -- اگر آپ کسی عوامی پروجیکٹ کے لیے سب گراف بنا رہے ہیں، تو آپ اس پروجیکٹ کو اپنے کمپیوٹر پر ڈاؤن لوڈ کر سکتے ہیں اور [`truffle compile`](https://truffleframework.com/docs/truffle/overview) کا استعمال کر کے ABI حاصل کر سکتے ہیں یا کمپائل کرنے کے لیے solc کا استعمال کریں. -- آپ ABI کو [ایتھر سکین](https://etherscan.io/) پر بھی تلاش کر سکتے ہیں، لیکن یہ ہمیشہ قابل بھروسہ نہیں ہوتا، کیونکہ وہاں اپ لوڈ کیا گیا ABI پرانا ہو سکتا ہے. یقینی بنائیں کہ آپ کے پاس صحیح ABI ہے، بصورت دیگر آپ کا سب گراف چلانا ناکام ہو جائے گا. - -## GraphQL سکیما - -آپ کے سب گراف کا اسکیما فائل `schema.graphql` میں ہے. GraphQL اسکیموں کی تعریف GraphQL انٹرفیس ڈیفینیشن لینگویج کا استعمال کرتے ہوئے کی جاتی ہے. اگر آپ نے کبھی بھی GraphQL سکیما نہیں لکھا ہے، تو یہ تجویز کی جاتی ہے کہ آپ GraphQL ٹائپ سسٹم پر اس پرائمر کو چیک کریں. GraphQL اسکیموں کے لیے حوالہ دستاویزات [GraphQL API](/querying/graphql-api) سیکشن میں مل سکتے ہیں. - -## ہستیوں کی تعریف کرنا - -ہستیوں کی وضاحت کرنے سے پہلے، ایک قدم پیچھے ہٹنا اور اس بارے میں سوچنا ضروری ہے کہ آپ کا ڈیٹا کس طرح تشکیل اور لنک کیا جاتا ہے. تمام کیوریز سب گراف اسکیما میں بیان کردہ ڈیٹا ماڈل اور سب گراف کے ذریعہ ترتیب کردہ ہستیوں کے متضاد بنائی جائیں گے. اس کی وجہ سے، یہ اچھا ہے کہ سب گراف اسکیما کو اس طرح سے بیان کیا جائے جو آپ کے ڈیپ کی ضروریات کے مطابق ہو. ہستیوں کو ایوینٹس یا فنکشنز کے بجائے "ڈیٹا پر مشتمل اشیاء" کے طور پر تصور کرنا مفید ہو سکتا ہے. - -گراف کے ساتھ، آپ `schema.graphql` میں ہستی کی اقسام کی وضاحت کرتے ہیں، اور گراف نوڈ اس اینٹیٹی کی قسم کے واحد مثالوں اور مجموعوں کے بارے میں کیوری کرنے کے لیے اعلیٰ معیار کی فیلڈز تیار کرے گا. ہر ایک قسم جو ایک ہستی ہونی چاہیے اسے `@entity` ہدایت کے ساتھ بیان کرنا ضروری ہے. پہلے سے طے شدہ طور پر، ہستیاں متغیر ہوتی ہیں، یعنی میپنگ موجودہ اداروں کو لوڈ کر سکتے ہیں، ان میں ترمیم کر سکتے ہیں اور اس ہستی کا نیا ورژن محفوظ کر سکتے ہیں. تغیر پذیری ایک قیمت پر آتی ہے، اور ہستی کی اقسام کے لیے جن کے لیے یہ معلوم ہے کہ ان میں کبھی بھی ترمیم نہیں کی جائے گی، مثال کے طور پر، کیونکہ ان میں صرف چین سے نکالا گیا ڈیٹا ہوتا ہے، ان کو `@entity(immutable: true)` کے ساتھ ناقابل تغیر کے طور پر نشان زد کرنے کی تجویز کی جاتی ہے. میپنگز غیر تبدیل شدہ ہستیوں میں تبدیلیاں کر سکتی ہیں جب تک کہ یہ تبدیلیاں اسی بلاک میں ہوتی ہیں جس میں ہستی بنائی گئی تھی. ناقابل تغیر ہستیاں لکھنے اور کیوری کرنے میں بہت تیز ہوتی ہیں، اور اس لیے جب بھی ممکن ہو استعمال کی جانی چاہیے. - -### اچھی مثال - -نیچے دی گئی `Gravatar` اینٹیٹی کو Gravatar آبجیکٹ کے ارد گرد بنایا گیا ہے اور یہ اس بات کی ایک اچھی مثال ہے کہ کسی اینٹیٹی کی تعریف کیسے کی جا سکتی ہے. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### بری مثال - -ذیل میں دی گئی مثالی اندراجات `GravatarAccepted` اور `GravatarDeclined` ایونٹس کی بنیاد پر ہے. ایونٹس یا فنکشن کالز کو ہستیوں سے 1:1 کے تناسب سے نقشہ کرنے کی تجویز نہیں کی جاتی ہے. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### اختیاری اور مطلوبہ فیلڈز - -ہستی کی فیلڈز کو مطلوبہ یا اختیاری کے طور پر بیان کیا جا سکتا ہے. مطلوبہ فیلڈز کی نشاندہی سکیما میں `!` سے ہوتی ہے. اگر میپنگ میں مطلوبہ فیلڈ سیٹ نہیں کی گئی ہے, تو فیلڈ کو کیوری کرتے وقت آپ کو یہ رکاوٹ ملے گی: - -``` -غیر صفر فیلڈ 'name' کے لیے خالی ویلیو حل ہو گئی -``` - -ہر ہستی کے پاس ایک `id` فیلڈ ہونا ضروری ہے، جو کہ `Bytes!` یا `String!` کی قسم کا ہونا چاہیے. عام طور پر `Bytes!` استعمال کرنے کی کی تجویز جاتی ہے، جب تک کہ `id` میں انسانی پڑھنے کے قابل متن شامل نہ ہو، کیونکہ `Bytes!` id کے ساتھ ہستیوں کا لکھنا تیز تر ہوگا اور `String!` `id` کے ساتھ کیوری کرنا. `id` فیلڈ پرائمری کلید کے طور پر کام کرتی ہے، اور ایک ہی قسم کے تمام اینٹیٹیز میں منفرد ہونا ضروری ہے۔ تاریخی وجوہات کی بنا پر، قسم `ID!` کو بھی قبول کیا جاتا ہے اور یہ `String!` کا مترادف ہے. - -کچھ ہستی کی اقسام کے لیے `id` کو دو دیگر ہستیوں کی id سے بنایا جاتا ہے. جو کہ `concat` کا استعمال کرتے ہوئے ممکن ہے، مثال کے طور پر، `let id = left.id.concat(right.id)` تاکہ `left` اور `right` کی id سے id بنے. اسی طرح، کسی موجودہ ہستی اور کاؤنٹر کی id سے ایک id بنانے کے لیے `count`، `let id = left.id.concatI32(count)` استعمال کیا جا سکتا ہے. کنکیٹینیشن منفرد id's تیار کرنے کی ضمانت دیتا ہے جب تک کہ `left` کی لمبائی ایسی تمام ہستیوں کے لیے یکساں ہو، مثال کے طور پر، کیونکہ `left.id` ایک `Address ہے`. - -### بلٹ ان اسکیلر اقسام - -#### GraphQL حمایت یافتہ اسکیلرز - -ہم اپنے GraphQL API میں درج ذیل اسکیلرز کی حمایت کرتے ہیں: - -| قسم | تفصیل | -| --- | --- | -| `Bytes` | Byte array، ایک ہیکساڈیسیمل سٹرنگ کے طور پر پیش کیا جاتا ہے. عام طور پر Ethereum hashes اور ایڈریسیس کے لیے استعمال ہوتا ہے. | -| `String` | `string` ویلیوز کے لیے اسکیلر. خالی حروف تعاون یافتہ نہیں ہیں اور خود بخود ہٹا دیے جاتے ہیں. | -| `Boolean` | `Boolean` ویلیوز کے لیے اسکیلر. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | ایک 8-بائٹ دستخط شدہ عدد، جسے 64-بٹ دستخط شدہ عدد بھی کہا جاتا ہے، -9,223,372,036,854,775,808 سے لے کر 9,223,372,036,854,775,807 تک کی ویلیوز کو ذخیرہ کرسکتا ہے۔ ایتھیریم سے `i64` کی نمائندگی کرنے کے لیے اسے استعمال کرنے کو ترجیح دیں۔ | -| `BigInt` | بڑے integers۔ Ethereum کی `uint32`، `int64`، `uint64`، ..., `uint256` اقسام کے لیے استعمال کیا جاتا ہے. نوٹ: `uint32` کے نیچے ہر چیز، جیسے `int32`، `uint24` یا `int8` کو `i32` کے طور پر دکھایا گیا ہے. | -| `BigDecimal` | `BigDecimal` اعلی درستگی والے اعشاریہ ایک significand اور ایک exponent کے طور پر پیش کیا جاتے ہہیں. Exponent رینج −6143 سے +6144 تک ہے۔ 34 سگنیفیکینڈ ہندسوں پر rounded کیا گیا۔. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -آپ اسکیما کے اندر enums بھی بنا سکتے ہیں. Enums میں درج ذیل نحو ہے: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -ایک بار اسکیما میں enum کی وضاحت ہوجانے کے بعد، آپ enum ویلیو کی سٹرنگ نمائندگی کو کسی اینٹیٹی پر enum فیلڈ سیٹ کرنے کے لیے استعمال کرسکتے ہیں۔ مثال کے طور پر، آپ پہلے اپنی اینٹیٹی کی وضاحت کرکے اور بعد میں `entity.tokenStatus = "SecondOwner"` کے ساتھ فیلڈ کو ترتیب دے کر `tokenStatus` کو `SecondOwner` پر سیٹ کر سکتے ہیں۔ ذیل کی مثال یہ ظاہر کرتی ہے کہ ٹوکن اینٹیٹی enum فیلڈ کے ساتھ کیسی نظر آئے گی: - -Enums لکھنے کے بارے میں مزید تفصیل [GraphQL دستاویزات](https://graphql.org/learn/schema/) میں مل سکتی ہے. - -#### ہستی کے تعلقات - -آپ کے اسکیما میں ایک ہستی کا ایک یا زیادہ دیگر ہستیوں سے تعلق ہو سکتا ہے. یہ تعلق آپ کے کیوریز میں شامل ہو سکتے ہیں۔ گراف میں تعلق یک طرفہ ہوتے ہیں۔ تعلق کے کسی بھی "اختتام" پر یک طرفہ تعلق کی وضاحت کرکے دو طرفہ تعلق کی تقلید کرنا ممکن ہے. - -تعلقات کی تعریف کسی دوسری فیلڈ کی طرح ہستیوں پر کی جاتی ہے سوائے اس کے کہ مخصوص کردہ قسم کسی اور ہستی کی ہو. - -#### ون-ٹو-ون تعلقات - -`TransactionReceipt` ہستی کی قسم کے ساتھ اختیاری one-to-one تعلق کے ساتھ `Transaction` ہستی کی قسم کی وضاحت کریں: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### ون-ٹو-مینی تعلقات - -ایک `TokenBalance` ہستی کی قسم کی وضاحت کریں جس میں ایک ٹوکن ہستی کی قسم کے ساتھ مطلوبہ ون-ٹو-مینی تعلق ہے: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### ریورس لک اپس - -ریورس لک اپس کی تعریف `@derivedFrom` فیلڈ کے ذریعے کسی ہستی پر کی جا سکتی ہے۔ یہ ہستی پر ایک ورچوئل فیلڈ بناتا ہے جس سے کیوری کیا جا سکتا ہے لیکن میپنگ API کے ذریعے دستی طور پر سیٹ نہیں کیا جا سکتا. بلکہ، یہ دوسری ہستی پر بیان کردہ تعلق سے اخذ کیا گیا ہے۔ ایسے تعلقات کے لیے، تعلقات کے دونوں اطراف کو ذخیرہ کرنا شاذ و نادر ہی سمجھ میں آتا ہے، اور انڈیکسنگ اور کیوری دونوں کی کارکردگی اس وقت بہتر ہوگی جب صرف ایک طرف ذخیرہ کیا جائے اور دوسرا اخذ کیا جائے. - -ون-ٹو-مینی تعلقات کے لیے، تعلق کو ہمیشہ 'ون' سائیڈ پر رکھنا چاہیے، اور 'مینی' سائیڈ کو ہمیشہ اخذ کیا جانا چاہیے۔ 'مینی' سائیڈ پر ہستیوں کی ایک ایرے کو ذخیرہ کرنے کے بجائے اس طرح سے تعلق کو ذخیرہ کرنے کے نتیجے میں سب گراف کی انڈیکسنگ اور کیوریز دونوں کے لیے نمایاں طور پر بہتر کارکردگی ہوگی۔ عام طور پر، ہستیوں کی ایریز کو ذخیرہ کرنے سے اتنا ہی گریز کیا جانا چاہیے جتنا کہ عملی ہو. - -#### مثال - -ہم ایک `tokenBalances` فیلڈ حاصل کرکے ٹوکن سے قابل رسائی ٹوکن کے لیے بیلنس بنا سکتے ہیں: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### ون-ٹو-مینی تعلقات - -مینی-ٹو-مینی تعلقات کے لیے، جیسے کہ صارفین جن میں سے ہر ایک کا تعلق کسی بھی تعداد میں تنظیموں سے ہو سکتا ہے، سب سے سیدھا، لیکن عام طور پر سب سے زیادہ پرفارمنس نہیں، تعلق کو ماڈل کرنے کا طریقہ شامل دونوں ہستیوں میں سے ہر ایک میں ایک ایرے کے طور پر ہے۔ اگر تعلق ہم آہنگ ہے تو، رشتے کے صرف ایک رخ کو ذخیرہ کرنے کی ضرورت ہے اور دوسری طرف اخذ کیا جا سکتا ہے. - -#### مثال - -ایک `User` ہستی کی قسم سے `Organization` ہستی کی قسم تک ریورس لک اپ کی وضاحت کریں۔ ذیل کی مثال میں، یہ `Organization` اینٹیٹی کے اندر سے `members` انتساب کو تلاش کرکے حاصل کیا جاتا ہے۔ کیوریز میں، `User` پر موجود `organizations` فیلڈ کو ان تمام `Organization` ہستیوں کو تلاش کرکے حل کیا جائے گا جن میں صارف کی ID شامل ہے. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -اس relationship کو ذخیرہ کرنے کا ایک زیادہ پرفارمنس والا طریقہ mapping table کے ذریعے ہے جس میں ہر ایک `User` / `Organization` کے جوڑے کے لیے ایک اندراج ہے جیسے اسکیما کے ساتھ - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -اس نقطہ نظر کا تقاضا ہے کہ کیوریز کو بازیافت کرنے کے لیے ایک اضافی سطح پر اتریں، مثال کے طور پر، صارفین کے لیے تنظیمیں: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -Many-to-many تعلقات کو ذخیرہ کرنے کے اس زیادہ وسیع طریقے کے نتیجے میں سب گراف کے لیے کم ڈیٹا ذخیرہ کیا جائے گا، اور اس لیے ایک سب گراف میں جو اکثر نمایاں طور پر انڈیکس اور کیوری کے لیے تیز تر ہوتا ہے. - -#### اسکیما میں کامینٹس شامل کرنا - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## فل ٹیکسٹ سرچ فیلڈز کی وضاحت کرنا - -ٹیکسٹ سرچ ان پٹ کی بنیاد پر فل ٹیکسٹ سرچ کیوریز ہستیوں کو فلٹر اور رینک کرتی ہیں. فل ٹیکسٹ کیوریز انڈیکس شدہ ٹیکسٹ ڈیٹا سے موازنہ کرنے سے پہلے کیوری کے متن کے ان پٹ کو سٹیمز میں پروسیس کرکے ملتے جلتے الفاظ کے میچز واپس کرنے کے قابل ہیں. - -فل ٹیکسٹ کیوری کی تعریف میں کیوری کا نام، ٹیکسٹ فیلڈز پر کارروائی کرنے کے لیے استعمال ہونے والی زبات کی لغت، نتائج کو ترتیب دینے کے لیے استعمال ہونے والا رینکنگ ایلگورتھم، اور تلاش میں شامل فیلڈز شامل ہیں. ہر فل ٹیکسٹ کیوری ایک سے زیادہ فیلڈز پر محیط ہو سکتا ہے، لیکن تمام شامل فیلڈز ایک ہی ہستی کی قسم سے ہونے چاہئیں. - -فل ٹیکسٹ کیوری شامل کرنے کے لیے، GraphQL اسکیما میں فل ٹیکسٹ ڈاریکٹو کے ساتھ `_Schema_` ٹائپ شامل کریں. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -مثال کی `bandSearch` فیلڈ کو `name`، `description` اور `bio` فیلڈز میں ٹیکسٹ دستاویزات کی بنیاد پر `Band` اینٹیٹیز کو فلٹر کرنے کے لیے کیوریز میں استعمال کیا جا سکتا ہے. فل ٹیکسٹ سرچ API کی تفصیل اور مزید مثال کے استعمال کے لیے [GraphQL API - کیوریز](/querying/graphql-api#queries) پر جائیں. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[فیچر مینجمنٹ](#experimental-features):** `specVersion` `0.0.4` سے اور اس کے بعد، `FullTextSearch` کا اعلان سب گراف مینی فیسٹ میں `خصوصیات` سیکشن کے تحت ہونا چاہیے. - -### تعاون یافتہ زبانیں ہیں - -ایک مختلف زبان کا انتخاب فل ٹیکسٹ کی تلاش کی API پر حتمی، اگرچہ بعض اوقات لطیف، اثر رکھتا ہے۔ فل ٹیکسٹ کیوری والے فیلڈز کا احاطہ منتخب کردہ زبان کے تناظر میں کیا جاتا ہے، اس لیے تجزیہ اور تلاش کی کیوریز کے ذریعہ تیار کردہ لیگزیمز زبان سے دوسری زبان میں مختلف ہوتے ہیں۔ مثال کے طور پر: جب تعاون یافتہ ترکی لغت کا استعمال کرتے ہوئے "ٹوکن" کو "ٹوک" کے لیے سٹیم کیا جاتا ہے، جب کہ، یقیناً، انگریزی لغت اسے "ٹوکن" پر سٹیم کرے گی. - -معاون زبانوں کی لغت: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | ڈینش | -| nl | ڈچ | -| en | انگریزی | -| fi | فنش | -| fr | فرینچ | -| de | جرمن | -| hu | ہنگری | -| it | اطالوی | -| no | ناروے | -| pt | پرتگالی | -| ro | رومانوی | -| ru | روسی | -| es | ہسپانوی | -| sv | سویڈش | -| tr | ترکش | - -### درجہ بندی الگورتھم - -نتائج ترتیب دینے کے لیے معاون الگورتھم: - -| الگورتھم | تفصیل | -| ------------ | --------------------------------------------------------------------------- | -| درجہ | نتائج ترتیب دینے کے لیے فل ٹیکسٹ کیوری کے میچ کوالٹی (1-0) کا استعمال کریں. | -| قربت کا درجہ | درجہ بندی کی طرح لیکن اس میں میچوں کی قربت بھی شامل ہے. | - -## میپنگ لکھنا - -میپنگز کسی خاص ذریعہ سے ڈیٹا لیتی ہے اور اسے ایسی ہستیوں میں تبدیل کرتی ہے جو آپ کے اسکیما میں بیان کی گئی ہیں۔ میپنگز کو [ ٹائپ سکرپٹ ](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) کے سب سیٹ میں لکھا جاتا ہے جسے [اسمبلی اسکرپٹ](https://github.com/AssemblyScript/assemblyscript/wiki) کہا جاتا ہے جسے WASM ([ویب اسمبلی](https://webassembly.org/)) میں مرتب کیا جاسکتا ہے۔ اسمبلی اسکرپٹ عام ٹائپ سکرپٹ سے زیادہ سخت ہے، پھر بھی ایک مانوس نحو فراہم کرتا ہے. - -ہر ایونٹ ہینڈلر کے لیے جس کی وضاحت `subgraph.yaml` میں `mapping.eventHandlers` کے نیچے کی گئی ہے، اسی نام کا ایک برآمد شدہ فنکشن بنائیں. ہر ہینڈلر کو `event` نامی ایک پیرامیٹر کو قبول کرنا چاہیے جو اس ایونٹ کے نام سے مطابقت رکھتا ہو جس کو ہینڈل کیا جا رہا ہے. - -مثال کے سب گراف میں، `src/mapping.ts` میں `NewGravatar` اور `UpdatedGravatar` ایونٹس کے ہینڈلرز شامل ہیں: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -پہلا ہینڈلر `NewGravatar` ایونٹ لیتا ہے اور `new Gravatar(event.params.id.toHex())` کے ساتھ ایک نئ `Gravatar` اینٹیٹی تخلیق کرتا ہے، متعلقہ ایونٹ کے پیرامیٹرز کا استعمال کرتے ہوئے ہستی کی فیلڈز کو پر کرتے ہیں. اس ہستی کی مثال کو متغیر `gravatar` کے ذریعے دکھایا گیا ہے، جس کی id قدر `event.params.id.toHex()` ہے. - -دوسرا ہینڈلر موجودہ `Gravatar` کو گراف نوڈ اسٹور سے لوڈ کرنے کی کوشش کرتا ہے. اگر یہ ابھی تک موجود نہیں ہے، تو اسے ڈیمانڈ پر بنایا جاتا ہے۔ اس کے بعد ہستی کو `gravatar.save()` کا استعمال کرتے ہوئے اسٹور میں واپس محفوظ کرنے سے پہلے نئے ایونٹ کے پیرامیٹرز سے مماثل ہونے کے لیے اپ ڈیٹ کیا جاتا ہے. - -### نئ ہستیوں بنانے کے لیے تجویز کردہ IDs - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## کوڈ تخلیق کرنا - -سمارٹ کنٹریکٹس، ایوینٹس اور ہستیوں کے ساتھ کام کرنا آسان اور ٹائپ محفوظ بنانے کے لیے، گراف CLI ڈیٹا کے ذرائع میں شامل سب گراف کے GraphQL اسکیما اور کنٹریکٹ ABIs سے اسمبلی سکرپٹ کی قسمیں تیار کر سکتا ہے. - -اس کے ساتھ کیا جاتا ہے - -```sh -graph codegen [--output-dir ] [] -``` - -لیکن زیادہ تر معاملات میں، سب گراف پہلے سے ہی `package.json` کے ذریعے پہلے سے تشکیل شدہ ہوتے ہیں تاکہ آپ اسے حاصل کرنے کے لیے درج ذیل میں سے ایک کو آسانی سے چلا سکیں: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -یہ `subgraph.yaml` میں مذکور ABI فائلوں میں ہر سمارٹ کنٹریکٹ کے لیے ایک اسمبلی سکرپٹ کلاس تیار کرے گا، جس سے آپ ان کنٹریکٹس کو میپنگ میں مخصوص ایڈریسز سے منسلک کر سکتے ہیں اور کاروائی والے بلاک کے خلاف صرف پڑھنے کے معاہدے کے طریقوں کو کال کر سکتے ہیں. یہ ہر کنٹریکٹ ایونٹ کے لیے ایک کلاس بھی تیار کرے گا تاکہ ایونٹ کے پیرامیٹرز تک آسانی سے رسائی فراہم کی جا سکے، نیز اس بلاک اور ٹرانزیکشن کو جس سے ایونٹ شروع ہوا ہے۔ یہ تمام قسمیں `//.ts` پر لکھی گئی ہیں۔ مثال کے سب گراف میں، یہ `generated/Gravity/Gravity.ts` ہوگا، جس سے میپنگ کو ان اقسام کو درآمد کرنے کی اجازت ملتی ہے. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -اس کے علاوہ، سب گراف کے GraphQL اسکیما میں ہر ایک قسم کے لیے ایک کلاس تیار کی جاتی ہے۔ یہ کلاسیں type-safe اینٹیٹی کی لوڈنگ، اینٹیٹی کے فیلڈز تک پڑھنے اور لکھنے تک رسائی فراہم کرتی ہیں اور ساتھ ہی ایک `save()` طریقہ فراہم کرتی ہیں تاکہ ہستیوں کو اسٹور کرنے کے لیے لکھیں۔ تمام اینٹیٹی کلاسز کو `/schema.ts` پر لکھا گیا ہے، جس سے میپنگ کو ان کے ساتھ درآمد کرنے کی اجازت ملتی ہے - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **نوٹ:** GraphQL اسکیما یا مینی فیسٹ میں شامل ABIs میں ہر تبدیلی کے بعد کوڈ جنریشن کو دوبارہ انجام دیا جانا چاہیے۔ سب گراف کی تعمیر یا تعیناتی سے پہلے اسے کم از کم ایک بار انجام دینا بھی ضروری ہے. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## ڈیٹا سورس ٹیمپلیٹس - -EVM سے مطابقت رکھنے والے سمارٹ کنٹریکٹس میں ایک عام نمونہ رجسٹری یا فیکٹری کنٹریکٹس کا استعمال ہے، جہاں ایک کنٹریکٹ دوسرے کنٹریکٹس کی صوابدیدی تعداد کو تخلیق کرتا ہے، ان کا انتظام کرتا ہے یا حوالہ دیتا ہے جن میں سے ہر ایک کی اپنی ریاست اور واقعات ہوتے ہیں. - -ان ذیلی کنٹریکٹس کے پتے پہلے سے معلوم ہو سکتے ہیں یا نہیں اور ان میں سے بہت سے کنٹریکٹس وقت کے ساتھ بنائے اور/یا شامل کیے جا سکتے ہیں۔ یہی وجہ ہے کہ، ایسی صورتوں میں، ایک واحد ڈیٹا سورس یا ڈیٹا کے ذرائع کی ایک مقررہ تعداد کی وضاحت کرنا ناممکن ہے اور مزید متحرک نقطہ نظر کی ضرورت ہے: _ڈیٹا سورس ٹیمپلیٹس_. - -### مرکزی کنٹریکٹ کے لیے ڈیٹا سورس - -سب سے پہلے، آپ مرکزی کنٹریکٹ کے لیے باقاعدہ ڈیٹا سورس کی وضاحت کرتے ہیں۔ ذیل کا ٹکڑا [یونی سویپ](https://uniswap.org) ایکسچینج فیکٹری کنٹریکٹ کے لیے ایک آسان مثال ڈیٹا سورس دکھاتا ہے. `NewExchange(address,address)` ایونٹ ہینڈلر کو نوٹ کریں. یہ اس وقت خارج ہوتا ہے جب فیکٹری کنٹریکٹ کے ذریعے ایک نیا ایکسچینج کنٹریکٹ بنایا جاتا ہے. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### متحرک طور پر بنائے گئے کنٹریکٹس کے لیے ڈیٹا سورس ٹیمپلیٹس - -پھر، آپ مینی فیسٹ میں _ڈیٹا سورس ٹیمپلیٹس_ شامل کرتے ہیں. یہ ڈیٹا کے باقاعدہ ذرائع سے مماثل ہیں، سوائے اس کے کہ ان میں `source` کے تحت پہلے سے طے شدہ کنٹریکٹ ایڈریس نہیں ہے. عام طور پر، آپ پیرنٹ کنٹریکٹ کے زیر انتظام یا حوالہ کردہ ہر قسم کے ذیلی کنٹریکٹ کے لیے ایک ٹیمپلیٹ کی وضاحت کریں گے. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### ڈیٹا سورس ٹیمپلیٹ کو شروع کرنا - -آخری مرحلے میں، آپ ٹیمپلیٹس میں سے کسی ایک سے ڈائنامک ڈیٹا سورس مثال بنانے کے لیے اپنے مرکزی کنٹریکٹس کی میپنگ کو اپ ڈیٹ کرتے ہیں. اس مثال میں، آپ `Exchange` ٹیمپلیٹ کو درآمد کرنے کے لیے مین کنٹریکٹ میپنگ کو تبدیل کریں گے اور اس پر `Exchange.create(address)` طریقہ کو کال کریں گے تاکہ نئے ایکسچینج کنٹریکٹ کو انڈیکس کرنا شروع کریں. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **نوٹ:** ایک نیا ڈیٹا سورس صرف اس بلاک کے لیے کالز اور ایونٹس پر کارروائی کرے گا جس میں اسے بنایا گیا تھا اور تمام مندرجہ ذیل بلاکس، لیکن تاریخی ڈیٹا، یعنی ڈیٹا پر کارروائی نہیں کرے گا جو پہلے سے بلاکس میں موجود ہے. -> -> اگر پہلے والے بلاکس میں نئے ڈیٹا سورس سے متعلقہ ڈیٹا ہوتا ہے، تو یہ بہترین ہے کہ کنٹریکٹ کی موجودہ حالت کو پڑھ کر اور ڈیٹا کا نیا سورس بننے کے وقت اس سٹیٹ کی نمائندگی کرنے والی اینٹیٹیز بنا کر اس ڈیٹا کو انڈیکس کریں. - -### ڈیٹا سورس سیاق و سباق - -ڈیٹا سورس سیاق و سباق ایک ٹیمپلیٹ کا انسٹینس بناتے وقت اضافی کنفیگریشن پاس کرنے کی اجازت دیتے ہیں. ہماری مثال میں، ہم کہتے ہیں کہ ایکسچینج ایک خاص تجارتی جوڑے سے وابستہ ہیں، جو کہ `NewExchange` ایونٹ میں شامل ہے. اس معلومات کو فوری ڈیٹا سورس میں منتقل کیا جا سکتا ہے، جیسے: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -`Exchange` ٹیمپلیٹ کی میپنگ کے اندر، اس کے بعد سیاق و سباق تک رسائی حاصل کی جا سکتی ہے: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -تمام ویلیو کی اقسام کے لیے setters اور getters ہوتے ہیں جیسے `setString` اور `getString`. - -## بلاکس شروع کریں - -`startBlock` ایک اختیاری سیٹنگ ہے جو آپ کو اس بات کی وضاحت کرنے کی اجازت دیتی ہے کہ چین میں کس بلاک سے ڈیٹا سورس انڈیکس کرنا شروع ہوگا۔ سٹارٹ بلاک سیٹ کرنا ڈیٹا سورس کو ممکنہ طور پر لاکھوں بلاکس کو چھوڑنے کی اجازت دیتا ہے جو غیر متعلقہ ہیں۔ عام طور پر، ایک سب گراف ڈیولپر `startBlock` کو اس بلاک پر سیٹ کرے گا جس میں ڈیٹا سورس کا سمارٹ کنٹریکٹ بنایا گیا تھا. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **نوٹ:** کنٹریکٹ تخلیق والے بلاک کو ایتھر سکین پر تیزی سے دیکھا جا سکتا ہے: -> -> 1. سرچ بار میں اس کا ایڈریس درج کرکے کنٹریکٹ کو تلاش کریں. -> 2. `Contract Creator` سیکشن میں تخلیق ٹرانزیکشن ہیش پر کلک کریں. -> 3. ٹرانزیکشن کی تفصیلات کا صفحہ لوڈ کریں جہاں آپ کو اس کنٹریکٹ کے لیے اسٹارٹ بلاک ملے گا. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## کال ہینڈلرز - -جب کہ ایوینٹس کنٹریکٹ کی سٹیٹ میں متعلقہ تبدیلیاں جمع کرنے کا ایک مؤثر طریقہ فراہم کرتے ہیں، بہت سے کنٹریکٹ گیس کے اخراجات کو بہتر بنانے کے لیے لاگ بنانے سے گریز کرتے ہیں. ان صورتوں میں، ایک سب گراف ڈیٹا سورس کنٹریکٹ پر کی گئی کالوں کو سبسکرائب کر سکتا ہے. یہ فنکشن کے سگنیچر اور میپنگ ہینڈلر کا حوالہ دینے والے کال ہینڈلرز کو بیان کرکے حاصل کیا جاتا ہے جو اس فنکشن میں کالوں پر کارروائی کرے گا. ان کالوں پر کارروائی کرنے کے لیے، میپنگ ہینڈلر کو کال کے ٹائپ کردہ ان پٹس اور آؤٹ پٹس کے ساتھ آرگومینٹ کے طور پر `ethereum.Call` موصول ہوگا. ٹرانزیکشن کی کال چین میں کسی بھی گہرائی میں کی گئی کالز میپنگ کو متحرک کریں گی، جس سے پراکسی کنٹریکٹ کے ذریعے ڈیٹا سورس کنٹریکٹ کے ساتھ سرگرمی کیپچر کی جا سکے گی. - -کال ہینڈلرز صرف دو صورتوں میں سے ایک میں ٹرگر کریں گے: جب مخصوص کردہ فنکشن کو کنٹریکٹ کے علاوہ کسی دوسرے اکاؤنٹ سے کال جاتا ہے یا جب اسے سولیڈیٹی میں بیرونی کے طور پر نشان زد کیا جاتا ہے اور اسی کنٹریکٹ میں کسی دوسرے فنکشن کے حصے کے طور پر کال کیا جاتا ہے. - -> **نوٹ:** کال ہینڈلرز فی الحال پیریٹی ٹریسنگ API پر منحصر ہیں. کچھ نیٹ ورکس، جیسے BNB چین اور آربٹرمم، اس API کو سپورٹ نہیں کرتے ہیں۔ اگر ان نیٹ ورکس میں سے کسی ایک کو انڈیکس کرنے والے سب گراف میں ایک یا زیادہ کال ہینڈلرز ہوتے ہیں، تو یہ مطابقت پذیری شروع نہیں کرے گا. سب گراف ڈویلپرز کو اس کے بجائے ایونٹ ہینڈلرز کا استعمال کرنا چاہیے. یہ کال ہینڈلرز سے پرفارمنس میں کہیں زیادہ ہیں، اور ہر evm نیٹ ورک پر تعاون یافتہ ہیں. - -### کال ہینڈلر کی تعریف - -اپنے مینی فیسٹ میں کال ہینڈلر کی وضاحت کرنے کے لیے، ڈیٹا سورس کے نیچے صرف ایک `callHandlers` array شامل کریں جسے آپ سبسکرائب کرنا چاہتے ہیں. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`function` کالز کو فلٹر کرنے کے لیے نارملائزڈ فنکشن سگنیچر ہے. `handler` پراپرٹی آپ کی میپنگ میں اس فنکشن کا نام ہے جسے آپ ڈیٹا سورس کنٹریکٹ میں ٹارگٹ فنکشن کو کال کرنے پر چلانا چاہیں گے. - -### میپنگ فنکشن - -ہر کال ہینڈلر ایک واحد پیرامیٹر لیتا ہے جس کی ایک قسم کال ہونے والے فنکشن کے نام سے ملتی ہے۔ مندرجہ بالا مثال کے سب گراف میں، میپنگ میں ایک ہینڈلر ہوتا ہے جب `createGravatar` فنکشن کو کال کیا جاتا ہے اور ایک `CreateGravatarCall` پیرامیٹر بطور آرگومینٹ حاصل کرتا ہے: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -`handleCreateGravatar` فنکشن ایک نیا `CreateGravatarCall` لیتا ہے جو `ethereum.Call` کا ذیلی کلاس ہے، جو `@graphprotocol/graph-ts` کے ذریعہ فراہم کیا گیا ہے، جس میں کال کے ٹائپ کردہ ان پٹس اور آؤٹ پٹس شامل ہیں۔ جب آپ `graph codegen` چلاتے ہیں تو `CreateGravatarCall` ٹائپ آپ کے لیے تیار کی جاتی ہے. - -## بلاک ہینڈلرز - -کنٹریکٹ ایونٹس یا فنکشن کالز کو سبسکرائب کرنے کے علاوہ، ایک سب گراف اپنے ڈیٹا کو اپ ڈیٹ کرنا چاہتا ہے جیسے جیسے چین میں نئے بلاکس شامل ہوتے ہیں. اس کو حاصل کرنے کے لیے ایک سب گراف ہر بلاک کے بعد یا پہلے سے طے شدہ فلٹر سے مماثل بلاکس کے بعد ایک فنکشن چلا سکتا ہے. - -### معاون فلٹرز - -#### کال فلٹر - -```yaml -filter: - kind: call -``` - -_متعین ہینڈلر کو ہر بلاک کے لیے ایک بار بلایا جائے گا جس میں کنٹریکٹ (ڈیٹا سورس) کی کال ہو گی جس کے تحت ہینڈلر کی تعریف کی گئی ہے._ - -> **نوٹ:** `call` فلٹر فی الحال پیریٹی ٹریسنگ API پر منحصر ہے. کچھ نیٹ ورکس، جیسے بی این بی چین اور آربٹرم، اس API کو سپورٹ نہیں کرتے ہیں۔ اگر ان نیٹ ورکس میں سے کسی ایک کو انڈیکس کرنے والے سب گراف میں `call` فلٹر کے ساتھ ایک یا زیادہ بلاک ہینڈلرز ہوتے ہیں، تو یہ مطابقت پذیری شروع نہیں کرے گا. - -بلاک ہینڈلر کے لیے فلٹر کی عدم موجودگی اس بات کو یقینی بنائے گی کہ ہینڈلر کو ہر بلاک کے لیے کال کیا جاتا ہے. ڈیٹا سورس میں ہر فلٹر کی قسم کے لیے صرف ایک بلاک ہینڈلر ہو سکتا ہے. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### پولنگ فلٹر - -> **`specVersion` کی ضرورت ہے >= 0.8.0** - -> **نوٹ:** پولنگ فلٹرز صرف ڈیٹا سورس آف `kind: ethereum` پر دستیاب ہیں. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -متعین ہینڈلر کو ہر `n` بلاکس کے لیے ایک بار بلایا جائے گا، جہاں `n` `every` فیلڈ میں فراہم کردہ قدر ہے۔ یہ ترتیب سب گراف کو باقاعدہ بلاک وقفوں پر مخصوص آپریشن کرنے کی اجازت دیتی ہے. - -#### ونس فلٹر - -> **`specVersion` کی ضرورت ہے >= 0.8.0** - -> **نوٹ:** ایک بار جب فلٹرز صرف ڈیٹا سورس آف `kind: ethereum` پر دستیاب ہوتے ہیں. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -ایک بار فلٹر کے ساتھ متعین ہینڈلر کو دوسرے تمام ہینڈلرز کے چلنے سے پہلے صرف ایک بار کال کیا جائے گا۔ یہ کنفیگریشن سب گراف کو انڈیکسنگ کے آغاز میں مخصوص کاموں کو انجام دیتے ہوئے، ہینڈلر کو ابتدائیہ ہینڈلر کے طور پر استعمال کرنے کی اجازت دیتی ہے. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### میپنگ فنکشن - -میپنگ فنکشن کو اس کی واحد آرگومینٹ کے طور پر ایک `ethereum.Block` ملے گا۔ ایونٹس کے لیے میپنگ کے فنکشنز کی طرح، یہ فنکشن اسٹور میں موجود سب گراف ہستیوں تک رسائی حاصل کر سکتا ہے، سمارٹ کنٹریکٹس کو کال کر سکتا ہے اور ہستیوں کو تخلیق یا اپ ڈیٹ کر سکتا ہے. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## گمنام ایونٹس - -اگر آپ کو سولیڈیٹی میں گمنام ایوینٹس پر کارروائی کرنے کی ضرورت ہے، تو یہ ایونٹ کا عنوان 0 فراہم کرکے حاصل کیا جاسکتا ہے، جیسا کہ مثال میں: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -ایک ایونٹ صرف اس وقت شروع کیا جائے گا جب سگنیچر اور topic 0 دونوں مماثل ہوں۔ بطور ڈیفالٹ، `topic0` ایونٹ کے سگنیچر کے ہیش کے برابر ہے. - -## ایونٹ ہینڈلرز میں ٹرانزیکشن کی رسیدیں - -`specVersion` `0.0.5` اور `apiVersion` `0.0.7` سے شروع کرتے ہوئے، ایونٹ ہینڈلرز کو اس کی ٹرانزیکشن کی رسید تک رسائی حاصل ہوسکتی ہے جس نے ان کا اخراج کیا. - -ایسا کرنے کے لیے، ایونٹ ہینڈلرز کو سب گراف مینی فیسٹ میں نئی ​​`receipt: true` کلید کے ساتھ اقرار کیا جانا چاہیے، جو کہ اختیاری ہے اور ڈیفالٹ میں غلط ہے. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -ہینڈلر فنکشن کے اندر، `Event.receipt` فیلڈ میں رسید تک رسائی حاصل کی جا سکتی ہے. جب `receipt` کلید کو `false` پر سیٹ کیا جاتا ہے یا مینی فیسٹ میں چھوڑ دیا جاتا ہے، تو اس کی بجائے ایک `null` قدر لوٹائی جائے گی. - -## تجرباتی خصوصیات - -`specVersion` `0.0.4` سے شروع کرتے ہوئے، سب گراف کی خصوصیات کا واضح طور پر مینی فیسٹ فائل کے اوپری سطح پر `features` سیکشن میں ان کا استعمال کرتے ہوئے واضح طور پر اعلان کیا جانا چاہیے۔ `camelCase` نام، جیسا کہ نیچے دیے گئے ٹیبل میں درج ہے: - -| خصوصیت | نام | -| ------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text سرچ](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -مثال کے طور پر، اگر کوئی سب گراف **Full-Text Search** اور **Non-fatal Errors** خصوصیات کا استعمال کرتا ہے، تو مینی فیسٹ میں `features` فیلڈ ہونا چاہئے: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -نوٹ کریں کہ کسی خصوصیت کا اعلان کیے بغیر استعمال کرنے سے سب گراف کی تعیناتی کے دوران **validation error** پیش آئے گی، لیکن اگر کسی خصوصیت کا اعلان کیا جائے لیکن استعمال نہ کیا جائے تو کوئی خرابی نہیں ہوگی. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -پہلے سے مطابقت پذیر سب گرافس پر انڈیکسنگ کی غلطیاں، بذریعہ ڈیفالٹ، سب گراف کے ناکام ہونے اور مطابقت پذیری کو روکنے کا سبب بنیں گی. سب گراف کو متبادل طور پر غلطیوں کی موجودگی میں مطابقت پذیری جاری رکھنے کے لیے ترتیب دیا جا سکتا ہے، ہینڈلر کی طرف سے کی گئی تبدیلیوں کو نظر انداز کر کے جس سے خرابی پیدا ہوئی. اس سے سب گراف مصنفین کو اپنے سب گراف کو درست کرنے کا وقت ملتا ہے جب کہ تازہ ترین بلاک کے خلاف کیوریز پیش کی جاتی رہتی ہیں، حالانکہ اس خرابی کی وجہ سے نتائج متضاد ہو سکتے ہیں. نوٹ کریں کہ کچھ غلطیاں اب بھی ہمیشہ مہلک ہوتی ہیں. غیر مہلک ہونے کے لیے، خرابی کو تعییناتی معلوم ہونا چاہیے. - -> **نوٹ:** گراف نیٹ ورک ابھی تک غیر مہلک غلطیوں کو سپورٹ نہیں کرتا ہے، اور ڈویلپرز کو سٹوڈیو کے ذریعے نیٹ ورک پر اس فعالیت کا استعمال کرتے ہوئے سب گراف تعینات نہیں کرنا چاہیے. - -غیر مہلک غلطیوں کو فعال کرنے کے لیے سب گراف مینی فیسٹ پر درج ذیل خصوصیت کا فلیگ ترتیب دینے کی ضرورت ہے: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -کیوری کو `subgraphError` آرگومینٹ کے ذریعے ممکنہ تضادات کے ساتھ کیوری کرنے والے ڈیٹا کے لیے بھی آپٹ ان کرنا چاہیے. `_meta` سے کیوری کرنے کی بھی تجویز کی جاتی ہے تاکہ یہ چیک کیا جا سکے کہ آیا سب گراف نے غلطیوں کو نظر انداز کر دیا ہے، جیسا کہ مثال میں: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -اگر سب گراف میں کسی غلطی کا سامنا ہوتا ہے، تو وہ کیوری ڈیٹا اور graphql کی غلطی دونوں کو پیغام `"indexing_error"` کے ساتھ لوٹائے گا، جیسا کہ اس مثال کے جواب میں: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### موجودہ سب گرافس پر گرافٹنگ - -> **نوٹ:** ابتدائی طور پر گراف نیٹ ورک میں اپ گریڈ کرتے وقت گرافٹنگ استعمال کرنے کی سفارش نہیں کی جاتی ہے۔ مزید جانیں [یہاں](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -جب سب گراف کو پہلی بار تعینات کیا جاتا ہے، تو یہ متعلقہ چین کے جینیسس بلاک (یا ہر ڈیٹا سورس کے ساتھ بیان کردہ `startBlock` پر) ایوینٹس کو انڈیکس کرنا شروع کرتا ہے. کچھ حالات میں; موجودہ سب گراف سے ڈیٹا کو دوبارہ استعمال کرنا اور بعد کے بلاک میں انڈیکس کرنا شروع کرنا فائدہ مند ہے۔ انڈیکسنگ کے اس موڈ کو _گرافٹنگ_ کہا جاتا ہے. گرافٹنگ, مثال کے طور پر، ڈیویلاپمنٹ کے دوران میپنگ میں ماضی کی سادہ غلطیوں کو تیزی سے حاصل کرنے کے لیے یا موجودہ سب گراف کے ناکام ہونے کے بعد اسے عارضی طور پر دوبارہ کام کرنے کے لیے مفید ہے. - -سب گراف کو بیس سب گراف پر اس وقت گرافٹ کیا جاتا ہے جب `subgraph.yaml` میں سب گراف مینی فیسٹ میں اوپر کی سطح پر `graft` بلاک ہوتا ہے: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -جب ایک سب گراف جس کے مینی فیسٹ میں `graft` بلاک تعینات کیا جاتا ہے، تو گراف نوڈ `base` سب گراف کے ڈیٹا کو کاپی کرے گا اور اس میں دیئے گئے `block` تک یا اس کو بھی شامل کرے گا۔ اور پھر اس بلاک سے نئے سب گراف کو انڈیکس کرنا جاری رکھے گا۔ بنیادی سب گراف ٹارگٹ گراف نوڈ انسٹینس پر موجود ہونا چاہیے اور کم از کم دیئے گئے بلاک تک انڈیکس ہونا چاہیے۔ اس پابندی کی وجہ سے، گرافٹنگ کا استعمال صرف ڈیویلیپمنٹ کے دوران یا کسی ہنگامی صورت حال کے دوران کیا جانا چاہیے تاکہ مساوی غیر گرافٹڈ شدہ سب گراف کی پیداوار کو تیز کیا جا سکے. - -چونکہ گرافٹنگ بیس ڈیٹا کو انڈیکس کرنے کے بجائے کاپی کرتا ہے، شروع سے انڈیکس کرنے کے مقابلے میں مطلوبہ بلاک میں سب گراف حاصل کرنا بہت تیز ہے، حالانکہ ابتدائی ڈیٹا کاپی بہت بڑے سب گراف کے لیے کئی گھنٹے لگ سکتی ہے۔ جب گرافٹ شدہ سب گراف کو شروع کیا جا رہا ہے، گراف نوڈ ان ہستی کی اقسام کے بارے میں معلومات کو لاگ کرے گا جو پہلے ہی کاپی ہو چکی ہیں. - -گرافٹڈ سب گراف ایک GraphQL اسکیما استعمال کرسکتا ہے جو بیس سب گراف میں سے ایک سے مماثل نہیں ہے، لیکن اس کے ساتھ محض مطابقت رکھتا ہے۔ یہ اپنے طور پر ایک درست سب گراف سکیما ہونا ضروری ہے، لیکن مندرجہ ذیل طریقوں سے بنیادی سب گراف کے سکیما سے انحراف کر سکتا ہے: - -- یہ ہستی کی اقسام کو ڈالتا یا ہٹاتا ہے -- یہ ہستی کی اقسام سے صفات کو ہٹاتا ہے -- یہ ہستی کی اقسام میں nullable صفات کا اضافہ کرتا ہے -- یہ non-nullable صفات کو nullable صفات میں بدل دیتا ہے -- یہ enums میں اقدار کا اضافہ کرتا ہے -- یہ انٹرفیس میں اضافہ کرتا یا ہٹاتا ہے -- یہ ان ہستی کی اقسام کے لیے تبدیل ہوتا ہے جن کے لیے ایک انٹرفیس لاگو کیا جاتا ہے - -> **[فیچر مینجمنٹ](#experimental-features):** `grafting` کو سب گراف مینی فیسٹ میں `features` کے تحت واضح کیا جانا چاہیے. - -## IPFS/Arweave File Data Sources - -فائل ڈیٹا کے ذرائع ایک مضبوط، قابل توسیع طریقے سے انڈیکسنگ کے دوران آف چین ڈیٹا تک رسائی کے لیے ایک نئی سب گراف کی فعالیت ہے۔ فائل ڈیٹا کے ذرائع IPFS اور Arweave سے فائلیں لانے میں معاونت کرتے ہیں. - -> یہ آف چین ڈیٹا کی تعییناتی انڈیکسنگ کے ساتھ ساتھ صوابدیدی HTTP سے حاصل کردہ ڈیٹا کے ممکنہ تعارف کی بنیاد بھی رکھتا ہے. - -### جائزہ - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> یہ موجودہ `ipfs.cat` API کی جگہ لے لیتا ہے - -### اپ گریڈ گائیڈ - -#### `graph-ts` اور `graph-cli` کو اپ ڈیٹ کریں - -فائل ڈیٹا سورسز کے لیے graph-ts >=0.29.0 اور graph-cli>=0.33.1 درکار ہے - -#### ایک نئے ادارے کی قسم شامل کریں جو فائلیں ملنے پر اپ ڈیٹ ہو جائے گی - -فائل ڈیٹا سورسز چین پر مبنی اداروں تک رسائی یا اپ ڈیٹ نہیں کر سکتے ہیں، لیکن فائل کے مخصوص اداروں کو اپ ڈیٹ کرنا ضروری ہے. - -اس کا مطلب یہ ہوسکتا ہے کہ موجودہ اداروں سے فیلڈز کو الگ الگ اداروں میں تقسیم کیا جائے، جو آپس میں جڑے ہوئے ہوں. - -اصل مشترکہ ادارہ: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -نیا، تقسیم شدہ ادارہ: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -اگر رشتہ پیرنٹ ادارے اور نتیجے میں آنے والی فائل ڈیٹا سورس ہستی کے درمیان 1:1 ہے، تو سادہ ترین نمونہ IPFS CID کو بطور لوک اپ استعمال کر کے پیرنٹ کی ہستی کو نتیجے میں آنے والی فائل ہستی سے جوڑنا ہے۔ اگر آپ کو اپنی نئی فائل پر مبنی ہستیوں کو ماڈل بنانے میں دشواری ہو رہی ہے تو ڈسکورڈ پر رابطہ کریں! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### `kind: file/ipfs` یا `kind: file/arweave` کے ساتھ ایک نیا ٹیمپلیٹڈ ڈیٹا ماخذ شامل کریں - -یہ ڈیٹا سورس ہے جو دلچسپی کی فائل کی شناخت ہونے پر پیدا کیا جائے گا. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> فی الحال `abis` کی ضرورت ہے، حالانکہ فائل ڈیٹا کے ذرائع کے اندر سے کنٹریکٹس کو کال کرنا ممکن نہیں ہے - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### فائلوں پر کارروائی کرنے کے لیے ایک نیا ہینڈلر بنائیں - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -پڑھنے کے قابل سٹرنگ کے طور پر فائل کی CID تک `dataSource` کے ذریعے اس طرح رسائی حاصل کی جا سکتی ہے: - -```typescript -const cid = dataSource.stringParam() -``` - -مثالی ہینڈلر: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### ضرورت پڑنے پر فائل ڈیٹا سورسز کو دریافت کریں - -اب آپ چین پر مبنی ہینڈلرز کے عمل کے دوران فائل ڈیٹا کے ذرائع بنا سکتے ہیں: - -- خود کار طریقے سے تیار کردہ `templates` سے ٹیمپلیٹ درآمد کریں -- میپنگ کے اندر سے `TemplateName.create(cid: string)` کو کال کریں، جہاں cid IPFS یا Arweave کے لیے مواد کا ایک درست شناخت کنندہ ہے - -IPFS کے لیے، گراف نوڈ [v0 اور v1 مواد کے شناخت کنندگان](https://docs.ipfs.tech/concepts/content-addressing/) کو سپورٹ کرتا ہے، اور ڈائرکٹریز کے ساتھ مواد کی شناخت کرنے والوں کو سپورٹ کرتا ہے (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -مثال: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -یہ ایک نیا فائل ڈیٹا سورس بنائے گا، جو گراف نوڈ کے کنفیگر کردہ آئی پی ایف ایس یا Arweave اینڈ پوائنٹ کو پول کرے گا، اگر یہ نہ ملا تو دوبارہ کوشش کریں۔ جب فائل مل جائے گی، فائل ڈیٹا سورس ہینڈلر کو عمل میں لایا جائے گا. - -یہ مثال CID کو پیرنٹ `Token` ہستی اور نتیجے میں `TokenMetadata` ہستی کے درمیان تلاش کے طور پر استعمال کر رہی ہے. - -> اس سے پہلے، یہ وہ مقام ہے جہاں ایک سب گراف ڈویلپر نے فائل لانے کے لیے `ipfs.cat(CID)` کو کیا تھا - -مبارک ہو، آپ فائل ڈیٹا سورسز استعمال کر رہے ہیں! - -#### آپ کے سب گراف کو تعینات کرنا - -اب آپ اپنے سب گراف کو کسی بھی گراف نوڈ >=v0.30.0-rc.0 پر `build` اور `deploy` کر سکتے ہیں. - -#### حدود - -فائل ڈیٹا سورس کے ہینڈلرز اور ہستیوں کو دیگر سب گراف ہستیوں سے الگ تھلگ کر دیا جاتا ہے، اس بات کو یقینی بناتے ہوئے کہ عمل درآمد کے وقت وہ تعیین پسند ہیں، اور اس بات کو یقینی بناتے ہیں کہ چین پر مبنی ڈیٹا سورسز کی کوئی آلودگی نہ ہو۔ مخصوص ہونا: - -- فائل ڈیٹا سورسز کے ذریعے تخلیق کردہ ادارے ناقابل تغیر ہیں، اور انہیں اپ ڈیٹ نہیں کیا جا سکتا -- فائل ڈیٹا کے ذرائع ہینڈلرز دوسرے فائل ڈیٹا سورسز سے اداروں تک رسائی حاصل نہیں کرسکتے ہیں -- فائل ڈیٹا کے ذرائع سے وابستہ ہستیوں تک چین پر مبنی ہینڈلرز تک رسائی حاصل نہیں کی جا سکتی ہے - -> اگرچہ یہ رکاوٹ زیادہ تر استعمال کے معاملات کے لیے مشکل نہیں ہونی چاہیے، لیکن یہ کچھ لوگوں کے لیے پیچیدگی پیدا کر سکتی ہے۔ براہ کرم ڈسکورڈ کے ذریعے رابطہ کریں اگر آپ کو اپنے فائل پر مبنی ڈیٹا کو سب گراف میں ماڈل کرنے میں مسئلہ درپیش ہے! - -مزید برآں، فائل ڈیٹا سورس سے ڈیٹا سورسز بنانا ممکن نہیں ہے، چاہے وہ آن چین ڈیٹا سورس ہو یا کوئی اور فائل ڈیٹا سورس۔ مستقبل میں یہ پابندی ختم ہو سکتی ہے. - -#### بہترین طریقے - -اگر آپ NFT میٹا ڈیٹا کو متعلقہ ٹوکنز سے جوڑ رہے ہیں، تو ٹوکن ہستی سے میٹا ڈیٹا ہستی کا حوالہ دینے کے لیے میٹا ڈیٹا کے IPFS ہیش کا استعمال کریں۔ آئی پی ایف ایس ہیش کو بطور ID استعمال کرتے ہوئے میٹا ڈیٹا ہستی کو محفوظ کریں. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -اگر آپ کے پاس ایسی ہستی ہیں جو کئی بار ریفریش ہوتے ہیں، تو IPFS ہیش کا استعمال کرتے ہوئے فائل پر مبنی منفرد ہستی بنائیں۔ entity ID، اور چین پر مبنی ہستی میں اخذ کردہ فیلڈ کا استعمال کرتے ہوئے ان کا حوالہ دیں. - -> ہم مندرجہ بالا سفارش کو بہتر بنانے کے لیے کام کر رہے ہیں، لہذا کیوریز صرف "حالیہ ترین" ورژن واپس کرتے ہیں - -#### معلوم مسائل - -فائل ڈیٹا سورسز کو فی الحال ABIs کی ضرورت ہے، حالانکہ ABIs استعمال کیے جانے والا ([مسئلہ](https://github.com/graphprotocol/graph-cli/issues/961)) نہیں۔ کام کا مقصد کسی بھی ABI کو شامل کرنا ہے. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### مثالیں - -[کرپٹو کوون سب گراف کی منتقلی](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### حوالہ جات - -[GIP فائل ڈیٹا سورسز](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ur/developing/creating-a-subgraph/_meta.js b/website/pages/ur/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/ur/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ur/developing/graph-ts/_meta.js b/website/pages/ur/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/ur/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ur/managing/deprecate-a-subgraph.mdx b/website/pages/ur/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/ur/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/ur/mips-faqs.mdx b/website/pages/ur/mips-faqs.mdx deleted file mode 100644 index e59e86551e55..000000000000 --- a/website/pages/ur/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs کے اکثر پوچھے گئے سوالات ---- - -## تعارف - -> نوٹ: MIPs پروگرام مئی 2023 سے بند ہے۔ حصہ لینے والے تمام انڈیکسرز کا شکریہ! - -گراف کا ایکو سسٹم میں حصہ لینے کا یہ ایک دلچسپ وقت ہے! [گراف ڈے 2022](https://thegraph.com/graph-day/2022/) کے دوران Yaniv Tal نے اعلان کیا کہ [ہوسٹڈ سروس کے غروب آفتاب](https://thegraph.com/blog/sunsetting-hosted-service/)ایک لمحہ جس کی گراف کا ایکو سسٹم کئی سالوں سے کام کر رہا ہے. - -ہوسٹڈ سروس کے غروب ہونے اور اس کی تمام سرگرمیوں کی ڈیسنٹرالا ئزڈ نیٹ ورک میں منتقلی کی حمایت کرنے کے لیے، گراف فاؤنڈیشن نے [مائیگریشن انفراسٹرکچر پرووائیڈرز (MIPs) پروگرام](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program) کا اعلان کیا ہے. - -MIPs پروگرام انڈیکسر کے لیے ایک ترغیب دینے والا پروگرام ہے جو انہیں ایتھیریم مین نیٹ سے آگے انڈیکس چینز کے لیے وسائل کے ساتھ مدد فراہم کرتا ہے اور گراف پروٹوکول کو ڈیسنٹرالا ئزڈ نیٹ ورک کو ایک ملٹی چین انفراسٹرکچر پرت میں پھیلانے میں مدد کرتا ہے. - -MIPs پروگرام نے GRT سپلائی (75M GRT) کا 0.75% مختص کیا ہے، 0.5% انڈیکسرز کو انعام دینے کے لیے جو نیٹ ورک کو بوٹسٹریپ کرنے میں حصہ ڈالتے ہیں اور 0.25% نیٹ ورک گرانٹس کے لیے مختص کیے گئے ہیں جو ملٹی چین سب گراف استعمال کرنے والے سب گراف ڈویلپرز کے لیے ہیں. - -### مفید وسائل - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [گراف نیٹ ورک پر ایک مؤثر انڈیکسر کیسے بنیں](https://thegraph.com/blog/how-to-become-indexer/) -- [انڈیکسر نالج ہب](https://thegraph.academy/indexers/) -- [آلوکیشن آپٹیمائزر](https://github.com/graphprotocol/allocationopt.jl) -- [آلوکیشن آپٹیمائزیشن ٹولنگ](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. کیا یہ ممکن ہے کہ انڈیکسنگ کا ایک درست ثبوت (POI) پیدا کیا جا سکے چاہے ایک سب گراف ناکام ہو گیا ہو؟ - -جی ہاں، یہ واقعی ہے. - -سیاق و سباق کے لیے، ثالثی چارٹر، [چارٹر کے بارے میں یہاں مزید جانیں](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract)، ناکام سب گراف کے لیے POI بنانے کے طریقہ کار کی وضاحت کرتا ہے. - -کمیونٹی کے ایک رکن، [SunTzu](https://github.com/suntzu93) نے ثالثی چارٹر کے طریقہ کار کے مطابق اس عمل کو خودکار بنانے کے لیے ایک اسکرپٹ بنایا ہے۔ ریپو چیک کریں [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. MIPs پروگرام سب سے پہلے کس چین کو ترغیب دے گا؟ - -پہلا چین جو ڈیسینٹرالائزڈ نیٹ ورک پر سپورٹ کیا جائے گا وہ ہے Gnosis چین! پہلے xDAI کے نام سے جانا جاتا تھا، Gnosis چین ایک EVM پر مبنی چین ہے۔ Gnosis Chain کو پہلے کے طور پر منتخب کیا گیا تھا کیونکہ اس کے چلانے والے نوڈس، انڈیکسر کی تیاری، گراف کے ساتھ سیدھ میں ہونا اور ویب 3 میں اپنانے کی صارف دوستی ہے. - -### 3. MIPs پروگرام میں نئی ​​چینز کیسے شامل کی جائیں گی؟ - -نئی چینز کا اعلان پورے MIPs پروگرام میں کیا جائے گا، جو انڈیکسنگ کی تیاری، طلب اور کمیونٹی کے جذبات پر مبنی ہے۔ چینز کو پہلے ٹیسٹ نیٹ پر سپورٹ کیا جائے گا اور اس کے بعد مین نیٹ پر اس چین کو سپورٹ کرنے کے لیے ایک GIP پاس کیا جائے گا۔ MIPs پروگرام میں حصہ لینے والے انڈیکسر اس بات کا انتخاب کریں گے کہ وہ کن چینز کو سپورٹ کرنے میں دلچسپی رکھتے ہیں اور سب گراف پیش کرنے کے لیے نیٹ ورک پر کیوری کی فیس اور انڈیکسنگ کے انعامات حاصل کرنے کے علاوہ فی چین انعامات حاصل کریں گے۔ MIPs کے شرکاء کو ان کی کارکردگی، نیٹ ورک کی ضروریات کو پورا کرنے کی صلاحیت، اور کمیونٹی سپورٹ کی بنیاد پر اسکور کیا جائے گا. - -### 4. ہمیں کیسے پتہ چلے گا کہ جب نیٹ ورک ایک نئی چین کے لیے تیار ہے؟ - -گراف فاؤنڈیشن تیاری کا بہترین اندازہ لگانے کے لیے QoS کارکردگی کے میٹرکس، نیٹ ورک کی کارکردگی اور کمیونٹی چینلز کی نگرانی کرے گی۔ ترجیح اس بات کو یقینی بنانا ہے کہ نیٹ ورک ان ملٹی چین ڈیپ کے لیے کارکردگی کی ضروریات کو پورا کرتا ہے تاکہ وہ اپنے سب گراف کو منتقل کر سکیں. - -### 5. فی چین انعامات کو کیسے تقسیم کیا جاتا ہے؟ - -یہ دیکھتے ہوئے کہ نوڈس کی مطابقت پذیری کے لیے چینز ان کی ضروریات میں مختلف ہوتی ہیں، اور وہ کیوری کے حجم اور اپنانے میں مختلف ہوتی ہیں، اس چینز کے چکر کے اختتام پر فی چین انعامات کا فیصلہ اس بات کو یقینی بنانے کے لیے کیا جائے گا کہ تمام تاثرات اور سیکھنے کو حاصل کیا جائے۔ تاہم، نیٹ ورک پر چین کے تعاون کے بعد ہر وقت انڈیکسرز کیوری کی فیس اور انڈیکسنگ کے انعامات بھی حاصل کر سکیں گے. - -### 6. کیا ہمیں MIPs پروگرام میں تمام چینز کو انڈیکس کرنے کی ضرورت ہے یا کیا ہم صرف ایک چین کو منتخب کر سکتے ہیں اور اسے انڈیکس کر سکتے ہیں؟ - -آپ جو بھی چین چاہیں انڈیکس کرنے میں آپ کا استقبال ہے! MIPs پروگرام کا ہدف انڈیکسرز کو ٹولز اور علم سے آراستہ کرنا ہے تاکہ وہ ان چینز کو انڈیکس کر سکیں جن کی وہ خواہش کرتے ہیں اور ویب 3 ایکو سسٹم کی حمایت کرتے ہیں جس میں وہ دلچسپی رکھتے ہیں۔ تاہم، ہر چین کے لیے، ٹیسٹ نیٹ سے مین نیٹ تک کے مراحل ہوتے ہیں۔ ان چینز کے تمام مراحل کو مکمل کرنا یقینی بنائیں جن کی آپ انڈیکس کر رہے ہیں۔ مراحل کے بارے میں مزید جاننے کے لیے [MIPs تصور کا صفحہ](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) دیکھیں. - -### 7. انعامات کب تقسیم کیے جائیں گے؟ - -MIPs کے انعامات فی چین تقسیم کیے جائیں گے جب کارکردگی کی پیمائشیں پوری ہو جائیں گی اور ان انڈیکسرز کی طرف سے منتقل شدہ سب گراف کو سپورٹ کیا جائے گا۔ اس چین کے چکر کے وسط میں فی چینز کل انعامات کے بارے میں معلومات تلاش کریں. - -### 8. اسکورنگ کیسے کام کرتی ہے؟ - -انڈیکسرز لیڈر بورڈ پر پورے پروگرام میں اسکورنگ کی بنیاد پر انعامات کے لیے مقابلہ کریں گے۔ پروگرام کی اسکورنگ پر مبنی ہوگی: - -**سب گراف کوریج** - -- کیا آپ فی چین سب گراف کے لیے زیادہ سے زیادہ مدد فراہم کر رہے ہیں؟ - -- MIPs کے دوران، بڑے انڈیکسرز سے توقع کی جاتی ہے کہ وہ 50%+ سب گراف فی چین جس کی وہ حمایت کرتے ہیں. - -**خدمت کا معیار** - -- کیا انڈیکسر سروس کے اچھے معیار (لیٹنسی، تازہ ڈیٹا، اپ ٹائم، وغیرہ) کے ساتھ چین کی خدمت کر رہا ہے؟ - -- کیا انڈیکسر سپورٹ کرنے والا ڈیپ ڈویلپرز ان کی ضروریات کے لیے رد عمل ظاہر کر رہا ہے؟ - -کیا انڈیکسر مؤثر طریقے سے مختص کر رہا ہے، نیٹ ورک کی مجموعی صحت میں حصہ ڈال رہا ہے؟ - -**کمیونٹی سپورٹ** - -- کیا انڈیکسر ساتھی انڈیکسرز کے ساتھ مل کر ملٹی چین کے لیے سیٹ اپ کرنے میں ان کی مدد کر رہا ہے؟ - -- کیا انڈیکسر پورے پروگرام میں بنیادی ڈویپلرز کو فیڈ بیک فراہم کر رہا ہے یا فورم میں انڈیکسرز کے ساتھ معلومات کا اشتراک کر رہا ہے؟ - -### 9. ڈسکورڈ رول کیسے تفویض کیا جائے گا؟ - -ماڈریٹرز آئندہ چند دنوں میں ذمہ داریاں تفویض کریں گے. - -### 10. کیا ٹیسٹ نیٹ پر پروگرام شروع کرنا اور پھر مین نیٹ پر جانا ٹھیک ہے؟ کیا آپ میرے نوڈ کی شناخت کر سکیں گے اور انعامات تقسیم کرتے وقت اسے مدنظر رکھیں گے؟ - -ہاں، درحقیقت آپ سے ایسا کرنے کی توقع کی جاتی ہے۔ کئی مراحل Görli پر ہیں اور ایک مین نیٹ پر ہے. - -### 11. آپ کس موقع پر شرکاء سے مین نیٹ تعیناتی شامل کرنے کی توقع کرتے ہیں؟ - -فیز 3 کے دوران مین نیٹ انڈیکسر رکھنے کی ضرورت ہوگی۔ اس پر مزید معلومات [جلد ہی اس تصور کے صفحے پر شیئر کی جائیں گی۔](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. کیا انعامات ویسٹنگ سے مشروط ہوں گے؟ - -پروگرام کے اختتام پر تقسیم کی جانے والی فیصد بنیان سے مشروط ہوگی۔ اس پر مزید انڈیکسر معاہدے میں شیئر کیا جائے گا. - -### 13. ایک سے زیادہ اراکین والی ٹیموں کے لیے، کیا ٹیم کے تمام اراکین کو MIPs ڈسکورڈ رول دیا جائے گا؟ - -جی ہاں - -### 14. کیا MIPs ٹیسٹ نیٹ میں حصہ لینے کے لیے گراف کیوریٹر پروگرام کے لاک ٹوکنز کا استعمال ممکن ہے؟ - -جی ہاں - -### 15. MIPs پروگرام کے دوران، کیا غلط POI پر تنازعہ کرنے کی مدت ہوگی؟ - -فیصلہ کیا جائے۔ اس بارے میں مزید تفصیلات کے لیے براہ کرم وقتاً فوقتاً اس صفحہ پر واپس جائیں یا اگر آپ کی درخواست فوری ہے تو براہ کرم info@thegraph.foundation پر ای میل کریں - -### 17. کیا ہم دو ویسٹنگ کنٹریکٹس کو جوڑ سکتے ہیں؟ - -نہیں، اختیارات یہ ہیں: آپ ایک دوسرے کو تفویض کر سکتے ہیں یا دو الگ الگ انڈیکس چلا سکتے ہیں. - -### 18. KYC سوالات؟ - -براہ کرم info@thegraph.foundation پر ای میل کریں - -### 19. میں Gnosis چین کو انڈیکس کرنے کے لیے تیار نہیں ہوں، کیا جب میں تیار ہوں تو کیا میں کود کر دوسری چین سے انڈیکس کرنا شروع کر سکتا ہوں؟ - -جی ہاں - -### 20. کیا سرورز چلانے کے لیے تجویز کردہ علاقے ہیں؟ - -ہم علاقوں کے بارے میں سفارشات نہیں دیتے ہیں۔ مقامات کا انتخاب کرتے وقت آپ سوچنا چاہیں گے کہ کرپٹو کرنسیوں کے لیے بڑی مارکیٹیں کہاں ہیں. - -### 21. "ہینڈلر گیس کی قیمت" کیا ہے؟ - -یہ ایک ہینڈلر کو انجام دینے کی لاگت کا فیصلہ کن پیمانہ ہے۔ اس کے برعکس جو نام تجویز کر سکتا ہے، اس کا تعلق بلاکچینز پر گیس کی قیمت سے نہیں ہے. diff --git a/website/pages/ur/network/_meta.js b/website/pages/ur/network/_meta.js index f737cf9d3c01..49858537c885 100644 --- a/website/pages/ur/network/_meta.js +++ b/website/pages/ur/network/_meta.js @@ -2,6 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - overview: 'جائزہ', - benefits: 'فوائد', } diff --git a/website/pages/ur/querying/_meta.js b/website/pages/ur/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/ur/querying/_meta.js +++ b/website/pages/ur/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/ur/querying/graph-client/_meta.js b/website/pages/ur/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/ur/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/vi/_meta.js b/website/pages/vi/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/vi/_meta.js +++ b/website/pages/vi/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/vi/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/vi/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/vi/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index c00fc43f7a1f..000000000000 --- a/website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Cài đặt bằng yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Cài đặt bằng npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/vi/deploying/hosted-service.mdx b/website/pages/vi/deploying/hosted-service.mdx deleted file mode 100644 index a275dcb7b958..000000000000 --- a/website/pages/vi/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Tạo một Subgraph - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -Chế độ thứ hai mà `graph init` hỗ trợ là tạo một dự án mới từ một subgraph mẫu. Lệnh sau thực hiện điều này: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/vi/deploying/subgraph-studio.mdx b/website/pages/vi/deploying/subgraph-studio.mdx deleted file mode 100644 index 9c3ebe4bb8c9..000000000000 --- a/website/pages/vi/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Lỗi không nghiêm trọng - - Ghép - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/vi/developing/creating-a-subgraph.mdx b/website/pages/vi/developing/creating-a-subgraph.mdx deleted file mode 100644 index d25ce1c18c71..000000000000 --- a/website/pages/vi/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -Định nghĩa subgraph bao gồm một số tệp: - -- `subgraph.yaml`: một tệp YAML chứa tệp kê khai subgraph - -- `schema.graphql`: một lược đồ GraphQL xác định dữ liệu nào được lưu trữ cho subgraph của bạn và cách truy vấn nó qua GraphQL - -- `Ánh xạ AssemblyScript`: Mã [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) dịch từ dữ liệu sự kiện sang các thực thể được xác định trong lược đồ của bạn (ví dụ: `mapping.ts` trong hướng dẫn này) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Cài đặt Graph CLI - -Graph CLI được viết bằng JavaScript và bạn sẽ cần cài đặt `yarn` hoặc `npm` để dùng nó; Chúng ta sẽ giả định rằng bạn đã có yarn trong những các bước sau. - -Một khi bạn có `yarn`, cài đặt Graph CLI bằng cách chạy - -**Cài đặt bằng yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Cài đặt bằng npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## Từ Một Hợp đồng Hiện có - -Lệnh sau tạo một subgraph lập chỉ mục tất cả các sự kiện của một hợp đồng hiện có. Nó cố gắng lấy ABI hợp đồng từ Etherscan và quay trở lại yêu cầu đường dẫn tệp cục bộ. Nếu thiếu bất kỳ đối số tùy chọn nào, nó sẽ đưa bạn đến một biểu mẫu tương tác. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` là ID của subgraph của bạn trong Subgraph Studio, bạn có thể tìm thấy mã này trên trang chi tiết subgraph của mình. - -## Từ một Subgraph mẫu - -Chế độ thứ hai mà `graph init` hỗ trợ là tạo một dự án mới từ một subgraph mẫu. Lệnh sau thực hiện điều này: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## Tệp kê khai Subgraph - -Tệp kê khai subgraph `subgraph.yaml` xác định các hợp đồng thông minh lập chỉ mục subgraph của bạn, các sự kiện từ các hợp đồng này cần chú ý đến và cách ánh xạ dữ liệu sự kiện tới các thực thể mà Graph Node lưu trữ và cho phép truy vấn. Bạn có thể tìm thấy thông số kỹ thuật đầy đủ cho các tệp kê khai subgraph [tại đây](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -Đối với subgraph mẫu, `subgraph.yaml` là: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -Các mục nhập quan trọng cần cập nhật cho tệp kê khai là: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: một danh sách tất cả các tên [tính năng](#experimental-features) đã sử dụng. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: một hoặc nhiều tệp ABI được đặt tên cho hợp đồng nguồn cũng như bất kỳ hợp đồng thông minh nào khác mà bạn tương tác từ bên trong ánh xạ. - -- `dataSources.mapping.eventHandlers`: liệt kê các sự kiện hợp đồng thông minh mà subgraph này phản ứng và các trình xử lý trong ánh xạ—./src/mapping.ts trong ví dụ — biến những sự kiện này thành các thực thể trong cửa hàng. - -- `dataSources.mapping.callHandlers`: liệt kê các chức năng của hợp đồng thông minh mà subgraph này phản ứng và xử lý trong ánh xạ chuyển đổi đầu vào và đầu ra cho các lệnh gọi hàm thành các thực thể trong cửa hàng. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -Các trình kích hoạt cho nguồn dữ liệu trong một khối được sắp xếp theo quy trình sau: - -1. Trình kích hoạt sự kiện và cuộc gọi được sắp xếp đầu tiên theo chỉ mục giao dịch trong khối. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Trình kích hoạt chặn được chạy sau trình kích hoạt sự kiện và cuộc gọi, theo thứ tự chúng được xác định trong tệp kê khai. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Phiên bản | Ghi chú phát hành | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Nhận các ABI - -(Các) tệp ABI phải khớp với (các) hợp đồng của bạn. Có một số cách để lấy tệp ABI: - -- Nếu bạn đang xây dựng dự án của riêng mình, bạn có thể sẽ có quyền truy cập vào các ABI mới nhất của mình. -- Nếu bạn đang xây dựng một subgraph cho một dự án công cộng, bạn có thể tải dự án đó xuống máy tính của mình và lấy ABI bằng cách sử dụng [`truffle compile`](https://truffleframework.com/docs/truffle/overview) hoặc sử dụng solc để biên dịch. -- Bạn cũng có thể tìm thấy ABI trên [Etherscan](https://etherscan.io/), nhưng điều này không phải lúc nào cũng đáng tin cậy, vì ABI được tải lên có thể đã lỗi thời. Đảm bảo rằng bạn có ABI phù hợp, nếu không việc chạy subgraph của bạn sẽ không thành công. - -## Lược đồ GraphQL - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Xác định các Thực thể - -Trước khi xác định các thực thể (entities), điều quan trọng là phải lùi lại một bước và suy nghĩ về cách dữ liệu của bạn được cấu trúc và liên kết. Tất cả các truy vấn sẽ được thực hiện dựa trên mô hình dữ liệu được xác định trong lược đồ subgraph và các thực thể được lập chỉ mục bởi subgraph. Bởi vì điều này, rất tốt để xác định lược đồ subgraph theo cách phù hợp với nhu cầu của dapp của bạn. Có thể hữu ích khi hình dung các thực thể là "đối tượng chứa dữ liệu", chứ không phải là các sự kiện hoặc chức năng. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Ví dụ tốt - -Thực thể `Gravatar` bên dưới được cấu trúc xung quanh một đối tượng Gravatar và là một ví dụ điển hình về cách một thực thể có thể được xác định. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Ví dụ tồi - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Các trường tùy chọn và bắt buộc - -Các trường thực thể có thể được xác định theo yêu cầu hoặc tùy chọn. Các trường bắt buộc được biểu thị bằng `!` trong lược đồ. Nếu trường bắt buộc không được đặt trong ánh xạ, bạn sẽ nhận được lỗi này khi truy vấn trường: - -``` -Giá trị rỗng (null) được giải quyết cho trường không phải null 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Các loại vô hướng tích hợp - -#### GraphQL Vô hướng được hỗ trợ - -We support the following scalars in our GraphQL API: - -| Loại | Miêu tả | -| --- | --- | -| `Bytes` | Mảng byte, được biểu diễn dưới dạng chuỗi thập lục phân. Thường được sử dụng cho các mã băm và địa chỉ Ethereum. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Mối quan hệ thực thể - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### Mối quan hệ một-một - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### Mối quan hệ một-nhiều - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Tra cứu ngược - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Ví dụ - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Mối quan hệ nhiều-nhiều - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Ví dụ - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Xác định các Trường Tìm kiếm toàn Văn bản - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Quản lý Tính năng](#experimental-features):** Từ `specVersion` `0.0.4` và trở đi, `fullTextSearch` phải được khai báo dưới phần `features` trong tệp kê khai subgraph. - -### Các ngôn ngữ được hỗ trợ - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Mã | Từ điển | -| ------ | ----------------- | -| simple | Khái quát | -| da | Tiếng Đan Mạch | -| nl | Tiếng Hà Lan | -| en | Tiếng Anh | -| fi | Tiếng Phần Lan | -| fr | Tiếng Pháp | -| de | Tiếng Đức | -| hu | Tiếng Hungary | -| it | Tiếng Ý | -| no | Tiếng Na uy | -| pt | Portuguese | -| ro | Tiếng Rumani | -| ru | Tiếng Nga | -| es | Tiếng Tây Ban Nha | -| sv | Tiếng Thụy Điển | -| tr | Tiếng Thổ Nhĩ Kỳ | - -### Thuật toán Xếp hạng - -Supported algorithms for ordering results: - -| Thuật toán | Miêu tả | -| ------------- | ------------------------------------------------------------------------------- | -| xếp hạng | Sử dụng chất lượng đối sánh (0-1) của truy vấn toàn văn bản để sắp xếp kết quả. | -| proximityRank | Tương tự như rank nhưng cũng bao gồm các kết quả tương tự gần giống. | - -## Viết Ánh xạ - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Các ID được Đề xuất để tạo các Thực thể Mới - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Tạo mã - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Mẫu Nguồn Dữ liệu - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Nguồn Dữ liệu cho Hợp đồng Chính - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Mẫu Nguồn Dữ liệu cho các Hợp đồng được Tạo Tự động - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Khởi tạo một Mẫu Nguồn Dữ liệu - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Lưu ý:** Nguồn dữ liệu mới sẽ chỉ xử lý các lệnh gọi và sự kiện cho khối mà nó được tạo và tất cả các khối tiếp theo, nhưng sẽ không xử lý dữ liệu lịch sử, tức là dữ liệu được chứa trong các khối trước đó. -> -> Nếu các khối trước đó chứa dữ liệu có liên quan đến nguồn dữ liệu mới, tốt nhất là lập chỉ mục dữ liệu đó bằng cách đọc trạng thái hiện tại của hợp đồng và tạo các thực thể đại diện cho trạng thái đó tại thời điểm nguồn dữ liệu mới được tạo. - -### Bối cảnh Nguồn Dữ liệu - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Khối Bắt đầu - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Lưu ý:** Khối tạo hợp đồng có thể được nhanh chóng tra cứu trên Etherscan: -> -> 1. Tìm kiếm hợp đồng bằng cách nhập địa chỉ của nó vào thanh tìm kiếm. -> 2. Nhấp vào băm giao dịch tạo trong phần `Contract Creator`. -> 3. Tải trang chi tiết giao dịch nơi bạn sẽ tìm thấy khối bắt đầu cho hợp đồng đó. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Trình xử lý lệnh gọi - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Xác định một Trình xử lý lệnh gọi - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Chức năng Ánh xạ - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Trình xử lý Khối - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Bộ lọc được hỗ trợ - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Chức năng Ánh xạ - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Sự kiện Ẩn danh - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Tính năng | Tên | -| ----------------------------------------------------- | ---------------- | -| [Lỗi không nghiêm trọng](#non-fatal-errors) | `nonFatalErrors` | -| [Tìm kiếm toàn văn](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Ghép](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Lỗi không nghiêm trọng - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Lưu ý:** Mạng The Graph chưa hỗ trợ các lỗi không nghiêm trọng và các nhà phát triển không nên triển khai các subgraph sử dụng chức năng đó vào mạng thông qua Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Ghép vào các Subgraph Hiện có - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- Nó thêm hoặc xóa các loại thực thể -- Nó loại bỏ các thuộc tính khỏi các loại thực thể -- Nó thêm các thuộc tính nullable vào các loại thực thể -- Nó biến các thuộc tính không thể nullable thành các thuộc tính nullable -- Nó thêm giá trị vào enums -- Nó thêm hoặc xóa các giao diện -- Nó thay đổi đối với loại thực thể nào mà một giao diện được triển khai - -> **[Quản lý Tính năng](#experimental-features):** `grafting` phải được khai báo dưới`features` trong tệp kê khai subgraph. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Tổng quan - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/vi/developing/creating-a-subgraph/_meta.js b/website/pages/vi/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/vi/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/vi/developing/graph-ts/_meta.js b/website/pages/vi/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/vi/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/vi/managing/deprecate-a-subgraph.mdx b/website/pages/vi/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/vi/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/vi/mips-faqs.mdx b/website/pages/vi/mips-faqs.mdx deleted file mode 100644 index 89bcf6131bd7..000000000000 --- a/website/pages/vi/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Giới thiệu - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/vi/network/_meta.js b/website/pages/vi/network/_meta.js index cb884e6b9169..49858537c885 100644 --- a/website/pages/vi/network/_meta.js +++ b/website/pages/vi/network/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - overview: 'Tổng quan', } diff --git a/website/pages/vi/querying/_meta.js b/website/pages/vi/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/vi/querying/_meta.js +++ b/website/pages/vi/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/vi/querying/graph-client/_meta.js b/website/pages/vi/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/vi/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/yo/_meta.js b/website/pages/yo/_meta.js index ac570f79abfc..f2f3b56163a5 100644 --- a/website/pages/yo/_meta.js +++ b/website/pages/yo/_meta.js @@ -1,5 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), + ...meta, } diff --git a/website/pages/yo/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/yo/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 840ad6900998..000000000000 --- a/website/pages/yo/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: Deploying a Subgraph to the Hosted Service ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## Store the Access Token - -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. - -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ - -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ - -**Subtitle** - Text that will appear in subgraph cards. - -**Description** - Description of the subgraph, visible on the subgraph details page. - -**GitHub URL** - Link to the subgraph repository on GitHub. - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -You deploy the subgraph by running `yarn deploy` - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. - -## Redeploying a Subgraph - -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. - -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using graph-cli - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Hosted service subgraph archive policy - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. diff --git a/website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 003f158c4284..000000000000 --- a/website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Deploying a Subgraph to Subgraph Studio ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to Subgraph Studio - -## Installing Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## Create your Subgraph in Subgraph Studio - -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. - -## Initialize your Subgraph - -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: - -```bash -graph init --studio -``` - -The `` value can be found on your subgraph details page in Subgraph Studio: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. - -Here is the command that you need to use to authenticate from the CLI: - -```bash -graph auth --studio -``` - -## Deploying a Subgraph to Subgraph Studio - -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. - -Here is the CLI command that you need to use to deploy your subgraph. - -```bash -graph deploy --studio -``` - -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. - -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. diff --git a/website/pages/yo/deploying/hosted-service.mdx b/website/pages/yo/deploying/hosted-service.mdx deleted file mode 100644 index 3c11045a8e44..000000000000 --- a/website/pages/yo/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: What is the Hosted Service? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). - -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. - -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). - -## Ṣẹda Subgraph kan - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### From an Existing Contract - -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** - -### From an Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -``` -graph init --from-example --product hosted-service / [] -``` - -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## Supported Networks on the hosted service - -You can find the list of the supported networks [here](/developing/supported-networks). diff --git a/website/pages/yo/deploying/subgraph-studio.mdx b/website/pages/yo/deploying/subgraph-studio.mdx deleted file mode 100644 index f2da63abff0b..000000000000 --- a/website/pages/yo/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -Welcome to your new launchpad 👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: - -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing - -## How to Create Your Account - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. - -## How to Create a Subgraph in Subgraph Studio - - - -## Subgraph Compatibility with The Graph Network - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: - - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting - -More features & networks will be added to The Graph Network incrementally. - -### Subgraph lifecycle flow - -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## Testing your Subgraph in Subgraph Studio - -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. - -## Publish your Subgraph in Subgraph Studio - -You’ve made it this far - congrats! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -Check out the video overview below as well: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! - -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. - -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! - -## Versioning your Subgraph with the CLI - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). - -### Automatic Archiving of Subgraph Versions - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/yo/developing/creating-a-subgraph.mdx b/website/pages/yo/developing/creating-a-subgraph.mdx deleted file mode 100644 index e38d897919f8..000000000000 --- a/website/pages/yo/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: Creating a Subgraph ---- - -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. - -![Defining a Subgraph](/img/defining-a-subgraph.png) - -The subgraph definition consists of a few files: - -- `subgraph.yaml`: a YAML file containing the subgraph manifest - -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL - -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## Install the Graph CLI - -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. - -Once you have `yarn`, install the Graph CLI by running - -**Install with yarn:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**Install with npm:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## From An Existing Contract - -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. - -## From An Example Subgraph - -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## Add New dataSources To An Existing Subgraph - -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. - -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. - -The contract `address` will be written to the `networks.json` for the relevant network. - -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -## The Subgraph Manifest - -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -The important entries to update for the manifest are: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -### Order of Triggering Handlers - -The triggers for a data source within a block are ordered using the following process: - -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. - -These ordering rules are subject to change. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| Version | Release notes | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### Getting The ABIs - -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: - -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## The GraphQL Schema - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. - -## Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. - -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. - -### Good Example - -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### Bad Example - -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### Optional and Required Fields - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: - -``` -Null value resolved for non-null field 'name' -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### Built-In Scalar Types - -#### GraphQL Supported Scalars - -We support the following scalars in our GraphQL API: - -| Type | Description | -| --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -#### Entity Relationships - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### One-To-One Relationships - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### One-To-Many Relationships - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Reverse Lookups - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### Example - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### Many-To-Many Relationships - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### Example - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -#### Adding comments to the schema - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## Defining Fulltext Search Fields - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -### Languages supported - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | - -## Writing Mappings - -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. - -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. - -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. - -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. - -### Recommended IDs for Creating New Entities - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## Code Generation - -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. - -This is done with - -```sh -graph codegen [--output-dir ] [] -``` - -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## Data Source Templates - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### Data Source for the Main Contract - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### Data Source Templates for Dynamically Created Contracts - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### Instantiating a Data Source Template - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. - -### Data Source Context - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## Start Blocks - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## Call Handlers - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### Defining a Call Handler - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### Mapping Function - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## Block Handlers - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### Supported Filters - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** - -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### Mapping Function - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## Anonymous Events - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Experimental features - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### Non-fatal errors - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### Grafting onto Existing Subgraphs - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### Overview - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### References - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/yo/developing/creating-a-subgraph/_meta.js b/website/pages/yo/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/yo/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/yo/developing/creating-a-subgraph/advanced.mdx b/website/pages/yo/developing/creating-a-subgraph/advanced.mdx new file mode 100644 index 000000000000..45acd610f237 --- /dev/null +++ b/website/pages/yo/developing/creating-a-subgraph/advanced.mdx @@ -0,0 +1,555 @@ +--- +title: Advance Subgraph Features +--- + +## Overview + +Add and implement advanced subgraph features to enhanced your subgraph's built. + +Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: + +| Feature | Name | +| ---------------------------------------------------- | ---------------- | +| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | +| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | + +For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - fullTextSearch + - nonFatalErrors +dataSources: ... +``` + +> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. + +## Timeseries and Aggregations + +Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. + +This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. + +### Example Schema + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} + +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +### Defining Timeseries and Aggregations + +Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. + +Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. + +#### Available Aggregation Intervals + +- `hour`: sets the timeseries period every hour, on the hour. +- `day`: sets the timeseries period every day, starting and ending at 00:00. + +#### Available Aggregation Functions + +- `sum`: Total of all values. +- `count`: Number of values. +- `min`: Minimum value. +- `max`: Maximum value. +- `first`: First value in the period. +- `last`: Last value in the period. + +#### Example Aggregations Query + +```graphql +{ + stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { + id + timestamp + sum + } +} +``` + +Note: + +To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. + +[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. + +## Non-fatal errors + +Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. + +> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. + +Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +features: + - nonFatalErrors + ... +``` + +The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: + +```graphql +foos(first: 100, subgraphError: allow) { + id +} + +_meta { + hasIndexingErrors +} +``` + +If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: + +```graphql +"data": { + "foos": [ + { + "id": "0xdead" + } + ], + "_meta": { + "hasIndexingErrors": true + } +}, +"errors": [ + { + "message": "indexing_error" + } +] +``` + +## IPFS/Arweave File Data Sources + +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. + +> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. + +### Overview + +Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. + +This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. + +> This replaces the existing `ipfs.cat` API + +### Upgrade guide + +#### Update `graph-ts` and `graph-cli` + +File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 + +#### Add a new entity type which will be updated when files are found + +File data sources cannot access or update chain-based entities, but must update file specific entities. + +This may mean splitting out fields from existing entities into separate entities, linked together. + +Original combined entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + externalURL: String! + ipfsURI: String! + image: String! + name: String! + description: String! + type: String! + updatedAtTimestamp: BigInt + owner: User! +} +``` + +New, split entity: + +```graphql +type Token @entity { + id: ID! + tokenID: BigInt! + tokenURI: String! + ipfsURI: TokenMetadata + updatedAtTimestamp: BigInt + owner: String! +} + +type TokenMetadata @entity { + id: ID! + image: String! + externalURL: String! + name: String! + description: String! +} +``` + +If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! + +> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. + +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` + +This is the data source which will be spawned when a file of interest is identified. + +```yaml +templates: + - name: TokenMetadata + kind: file/ipfs + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + file: ./src/mapping.ts + handler: handleMetadata + entities: + - TokenMetadata + abis: + - name: Token + file: ./abis/Token.json +``` + +> Currently `abis` are required, though it is not possible to call contracts from within file data sources + +The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. + +#### Create a new handler to process files + +This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). + +The CID of the file as a readable string can be accessed via the `dataSource` as follows: + +```typescript +const cid = dataSource.stringParam() +``` + +Example handler: + +```typescript +import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' +import { TokenMetadata } from '../generated/schema' + +export function handleMetadata(content: Bytes): void { + let tokenMetadata = new TokenMetadata(dataSource.stringParam()) + const value = json.fromBytes(content).toObject() + if (value) { + const image = value.get('image') + const name = value.get('name') + const description = value.get('description') + const externalURL = value.get('external_url') + + if (name && image && description && externalURL) { + tokenMetadata.name = name.toString() + tokenMetadata.image = image.toString() + tokenMetadata.externalURL = externalURL.toString() + tokenMetadata.description = description.toString() + } + + tokenMetadata.save() + } +} +``` + +#### Spawn file data sources when required + +You can now create file data sources during execution of chain-based handlers: + +- Import the template from the auto-generated `templates` +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). + +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). + +Example: + +```typescript +import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' + +const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' +//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. + +export function handleTransfer(event: TransferEvent): void { + let token = Token.load(event.params.tokenId.toString()) + if (!token) { + token = new Token(event.params.tokenId.toString()) + token.tokenID = event.params.tokenId + + token.tokenURI = '/' + event.params.tokenId.toString() + '.json' + const tokenIpfsHash = ipfshash + token.tokenURI + //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + + token.ipfsURI = tokenIpfsHash + + TokenMetadataTemplate.create(tokenIpfsHash) + } + + token.updatedAtTimestamp = event.block.timestamp + token.owner = event.params.to.toHexString() + token.save() +} +``` + +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. + +This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. + +> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file + +Congratulations, you are using file data sources! + +#### Deploying your subgraphs + +You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. + +#### Limitations + +File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: + +- Entities created by File Data Sources are immutable, and cannot be updated +- File Data Source handlers cannot access entities from other file data sources +- Entities associated with File Data Sources cannot be accessed by chain-based handlers + +> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! + +Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. + +#### Best practices + +If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. + +You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. + +If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. + +> We are working to improve the above recommendation, so queries only return the "most recent" version + +#### Known issues + +File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. + +Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. + +#### Examples + +[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) + +#### References + +[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) + +## Indexed Argument Filters / Topic Filters + +> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` + +Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. + +- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. + +- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. + +### How Topic Filters Work + +When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. + +- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract Token { + // Event declaration with indexed parameters for addresses + event Transfer(address indexed from, address indexed to, uint256 value); + + // Function to simulate transferring tokens + function transfer(address to, uint256 value) public { + // Emitting the Transfer event with from, to, and value + emit Transfer(msg.sender, to, value); + } +} +``` + +In this example: + +- The `Transfer` event is used to log transactions of tokens between addresses. +- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. +- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. + +#### Configuration in Subgraphs + +Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: + +```yaml +eventHandlers: + - event: SomeEvent(indexed uint256, indexed address, indexed uint256) + handler: handleSomeEvent + topic1: ['0xValue1', '0xValue2'] + topic2: ['0xAddress1', '0xAddress2'] + topic3: ['0xValue3'] +``` + +In this setup: + +- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. +- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. + +#### Filter Logic + +- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. +- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. + +#### Example 1: Tracking Direct Transfers from Address A to Address B + +```yaml +eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleDirectedTransfer + topic1: ['0xAddressA'] # Sender Address + topic2: ['0xAddressB'] # Receiver Address +``` + +In this configuration: + +- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. +- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. +- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. + +#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses + +```yaml +eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleTransferToOrFrom + topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address + topic2: ['0xAddressB', '0xAddressC'] # Receiver Address +``` + +In this configuration: + +- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. +- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. +- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. + +## Declared eth_call + +> Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. + +Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. + +This feature does the following: + +- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. +- Allows faster data fetching, resulting in quicker query responses and a better user experience. +- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. + +### Key Concepts + +- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. +- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. +- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). + +#### Scenario without Declarative `eth_calls` + +Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. + +Traditionally, these calls might be made sequentially: + +1. Call 1 (Transactions): Takes 3 seconds +2. Call 2 (Balance): Takes 2 seconds +3. Call 3 (Token Holdings): Takes 4 seconds + +Total time taken = 3 + 2 + 4 = 9 seconds + +#### Scenario with Declarative `eth_calls` + +With this feature, you can declare these calls to be executed in parallel: + +1. Call 1 (Transactions): Takes 3 seconds +2. Call 2 (Balance): Takes 2 seconds +3. Call 3 (Token Holdings): Takes 4 seconds + +Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. + +Total time taken = max (3, 2, 4) = 4 seconds + +#### How it Works + +1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. +2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. +3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. + +#### Example Configuration in Subgraph Manifest + +Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. + +`Subgraph.yaml` using `event.address`: + +```yaml +eventHandlers: +event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) +handler: handleSwap +calls: + global0X128: Pool[event.address].feeGrowthGlobal0X128() + global1X128: Pool[event.address].feeGrowthGlobal1X128() +``` + +Details for the example above: + +- `global0X128` is the declared `eth_call`. +- The text (`global0X128`) is the label for this `eth_call` which is used when logging errors. +- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` +- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. + +`Subgraph.yaml` using `event.params` + +```yaml +calls: + - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() +``` + +### Grafting onto Existing Subgraphs + +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + +When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. + +A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: + +```yaml +description: ... +graft: + base: Qm... # Subgraph ID of base subgraph + block: 7345624 # Block number +``` + +When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. + +Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. + +The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: + +- It adds or removes entity types +- It removes attributes from entity types +- It adds nullable attributes to entity types +- It turns non-nullable attributes into nullable attributes +- It adds values to enums +- It adds or removes interfaces +- It changes for which entity types an interface is implemented + +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. diff --git a/website/pages/yo/developing/creating-a-subgraph/assemblyscript-mappings.mdx b/website/pages/yo/developing/creating-a-subgraph/assemblyscript-mappings.mdx new file mode 100644 index 000000000000..2ac894695fe1 --- /dev/null +++ b/website/pages/yo/developing/creating-a-subgraph/assemblyscript-mappings.mdx @@ -0,0 +1,113 @@ +--- +title: Writing AssemblyScript Mappings +--- + +## Overview + +The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. + +## Writing Mappings + +For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. + +In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: + +```javascript +import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' +import { Gravatar } from '../generated/schema' + +export function handleNewGravatar(event: NewGravatar): void { + let gravatar = new Gravatar(event.params.id) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} + +export function handleUpdatedGravatar(event: UpdatedGravatar): void { + let id = event.params.id + let gravatar = Gravatar.load(id) + if (gravatar == null) { + gravatar = new Gravatar(id) + } + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() +} +``` + +The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. + +The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. + +### Recommended IDs for Creating New Entities + +It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. + +- `transfer.id = event.transaction.hash` + +- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` + +- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like + +```typescript +let dayID = event.block.timestamp.toI32() / 86400 +let id = Bytes.fromI32(dayID) +``` + +- Convert constant addresses to `Bytes`. + +`const id = Bytes.fromHexString('0xdead...beef')` + +There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. + +### Handling of entities with identical IDs + +When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. + +If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. + +If no value is set for a field in the new entity with the same ID, the field will result in null as well. + +## Code Generation + +In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. + +This is done with + +```sh +graph codegen [--output-dir ] [] +``` + +but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: + +```sh +# Yarn +yarn codegen + +# NPM +npm run codegen +``` + +This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. + +```javascript +import { + // The contract class: + Gravity, + // The events classes: + NewGravatar, + UpdatedGravatar, +} from '../generated/Gravity/Gravity' +``` + +In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with + +```javascript +import { Gravatar } from '../generated/schema' +``` + +> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. + +Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. diff --git a/website/pages/yo/developing/creating-a-subgraph/install-the-cli.mdx b/website/pages/yo/developing/creating-a-subgraph/install-the-cli.mdx new file mode 100644 index 000000000000..282c68973a8a --- /dev/null +++ b/website/pages/yo/developing/creating-a-subgraph/install-the-cli.mdx @@ -0,0 +1,119 @@ +--- +title: Install the Graph CLI +--- + +> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/network/curating/). + +## Overview + +The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/creating-a-subgraph/subgraph-manifest/) and compiles the [mappings](/creating-a-subgraph/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. + +## Getting Started + +### Install the Graph CLI + +The Graph CLI is written in TypeScript, and you must have `node` and either `npm` or `yarn` installed to use it. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. + +On your local machine, run one of the following commands: + +#### Using [npm](https://www.npmjs.com/) + +```bash +npm install -g @graphprotocol/graph-cli@latest +``` + +#### Using [yarn](https://yarnpkg.com/) + +```bash +yarn global add @graphprotocol/graph-cli +``` + +The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. + +## Create a Subgraph + +### From an Existing Contract + +The following command creates a subgraph that indexes all events of an existing contract: + +```sh +graph init \ + --product subgraph-studio + --from-contract \ + [--network ] \ + [--abi ] \ + [] +``` + +- The command tries to retrieve the contract ABI from Etherscan. + + - The Graph CLI relies on a public RPC endpoint. While occasional failures are expected, retries typically resolve this issue. If failures persist, consider using a local ABI. + +- If any of the optional arguments are missing, it guides you through an interactive form. + +- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. + +### From an Example Subgraph + +The following command initializes a new project from an example subgraph: + +```sh +graph init --from-example=example-subgraph +``` + +- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. + +- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. + +### Add New `dataSources` to an Existing Subgraph + +`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. + +Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: + +```sh +graph add
[] + +Options: + + --abi Path to the contract ABI (default: download from Etherscan) + --contract-name Name of the contract (default: Contract) + --merge-entities Whether to merge entities with the same name (default: false) + --network-file Networks config file path (default: "./networks.json") +``` + +#### Specifics + +The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option) and creates a new `dataSource`, similar to how the `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. This allows you to index implementation contracts from their proxy contracts. + +- The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: + + - If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. + + - If `false`: a new `entity` & `event` handler should be created with `${dataSourceName}{EventName}`. + +- The contract `address` will be written to the `networks.json` for the relevant network. + +> Note: When using the interactive CLI, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. + +### Getting The ABIs + +The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: + +- If you are building your own project, you will likely have access to your most current ABIs. +- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. +- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. + +## SpecVersion Releases + +| Version | Release notes | +| :-: | --- | +| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | +| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | +| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | +| 0.0.9 | Supports `endBlock` feature | +| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | +| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | +| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | +| 0.0.5 | Added support for event handlers having access to transaction receipts. | +| 0.0.4 | Added support for managing subgraph features. | diff --git a/website/pages/yo/developing/creating-a-subgraph/ql-schema.mdx b/website/pages/yo/developing/creating-a-subgraph/ql-schema.mdx new file mode 100644 index 000000000000..90036d1bfab9 --- /dev/null +++ b/website/pages/yo/developing/creating-a-subgraph/ql-schema.mdx @@ -0,0 +1,312 @@ +--- +title: The Graph QL Schema +--- + +## Overview + +The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. + +> Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api/) section. + +### Defining Entities + +Before defining entities, it is important to take a step back and think about how your data is structured and linked. + +- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. +- It may be useful to imagine entities as "objects containing data", rather than as events or functions. +- You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. +- Each type that should be an entity is required to be annotated with an `@entity` directive. +- By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. + - Mutability comes at a price, so for entity types that will never be modified, such as those containing data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. + - If changes happen in the same block in which the entity was created, then mappings can make changes to immutable entities. Immutable entities are much faster to write and to query so they should be used whenever possible. + +#### Good Example + +The following `Gravatar` entity is structured around a Gravatar object and is a good example of how an entity could be defined. + +```graphql +type Gravatar @entity(immutable: true) { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String + accepted: Boolean +} +``` + +#### Bad Example + +The following example `GravatarAccepted` and `GravatarDeclined` entities are based around events. It is not recommended to map events or function calls to entities 1:1. + +```graphql +type GravatarAccepted @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} + +type GravatarDeclined @entity { + id: Bytes! + owner: Bytes + displayName: String + imageUrl: String +} +``` + +#### Optional and Required Fields + +Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If the field is a scalar field, you get an error when you try to store the entity. If the field references another entity then you get this error: + +``` +Null value resolved for non-null field 'name' +``` + +Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. + +For some entity types the `id` for `Bytes!` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id) ` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. + +### Built-In Scalar Types + +#### GraphQL Supported Scalars + +The following scalars are supported in the GraphQL API: + +| Type | Description | +| --- | --- | +| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | +| `Boolean` | Scalar for `boolean` values. | +| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | +| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | + +### Enums + +You can also create enums within a schema. Enums have the following syntax: + +```graphql +enum TokenStatus { + OriginalOwner + SecondOwner + ThirdOwner +} +``` + +Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: + +More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). + +### Entity Relationships + +An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. + +Relationships are defined on entities just like any other field except that the type specified is that of another entity. + +#### One-To-One Relationships + +Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: + +```graphql +type Transaction @entity(immutable: true) { + id: Bytes! + transactionReceipt: TransactionReceipt +} + +type TransactionReceipt @entity(immutable: true) { + id: Bytes! + transaction: Transaction +} +``` + +#### One-To-Many Relationships + +Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +### Reverse Lookups + +Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. + +For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. + +#### Example + +We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: + +```graphql +type Token @entity(immutable: true) { + id: Bytes! + tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") +} + +type TokenBalance @entity { + id: Bytes! + amount: Int! + token: Token! +} +``` + +#### Many-To-Many Relationships + +For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. + +#### Example + +Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [User!]! +} + +type User @entity { + id: Bytes! + name: String! + organizations: [Organization!]! @derivedFrom(field: "members") +} +``` + +A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like + +```graphql +type Organization @entity { + id: Bytes! + name: String! + members: [UserOrganization!]! @derivedFrom(field: "organization") +} + +type User @entity { + id: Bytes! + name: String! + organizations: [UserOrganization!] @derivedFrom(field: "user") +} + +type UserOrganization @entity { + id: Bytes! # Set to `user.id.concat(organization.id)` + user: User! + organization: Organization! +} +``` + +This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: + +```graphql +query usersWithOrganizations { + users { + organizations { + # this is a UserOrganization entity + organization { + name + } + } + } +} +``` + +This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. + +### Adding comments to the schema + +As per GraphQL spec, comments can be added above schema entity attributes using the hash symbol `#`. This is illustrated in the example below: + +```graphql +type MyFirstEntity @entity { + # unique identifier and primary key of the entity + id: Bytes! + address: Bytes! +} +``` + +## Defining Fulltext Search Fields + +Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. + +A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. + +To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. + +```graphql +type _Schema_ + @fulltext( + name: "bandSearch" + language: en + algorithm: rank + include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] + ) + +type Band @entity { + id: Bytes! + name: String! + description: String! + bio: String + wallet: Address + labels: [Label!]! + discography: [Album!]! + members: [Musician!]! +} +``` + +The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. + +```graphql +query { + bandSearch(text: "breaks & electro & detroit") { + id + name + description + wallet + } +} +``` + +> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. + +## Languages supported + +Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". + +Supported language dictionaries: + +| Code | Dictionary | +| ------ | ---------- | +| simple | General | +| da | Danish | +| nl | Dutch | +| en | English | +| fi | Finnish | +| fr | French | +| de | German | +| hu | Hungarian | +| it | Italian | +| no | Norwegian | +| pt | Portuguese | +| ro | Romanian | +| ru | Russian | +| es | Spanish | +| sv | Swedish | +| tr | Turkish | + +### Ranking Algorithms + +Supported algorithms for ordering results: + +| Algorithm | Description | +| ------------- | ----------------------------------------------------------------------- | +| rank | Use the match quality (0-1) of the fulltext query to order the results. | +| proximityRank | Similar to rank but also includes the proximity of the matches. | diff --git a/website/pages/yo/developing/creating-a-subgraph/starting-your-subgraph.mdx b/website/pages/yo/developing/creating-a-subgraph/starting-your-subgraph.mdx new file mode 100644 index 000000000000..5127f01632aa --- /dev/null +++ b/website/pages/yo/developing/creating-a-subgraph/starting-your-subgraph.mdx @@ -0,0 +1,21 @@ +--- +title: Starting Your Subgraph +--- + +## Overview + +The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. + +When you create a [subgraph](/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. + +Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. + +### Start Building + +Start the process and build a subgraph that matches your needs: + +1. [Install the CLI](/developing/creating-a-subgraph/install-the-cli/) - Set up your infrastructure +2. [Subgraph Manifest](/developing/creating-a-subgraph/subgraph-manifest/) - Understand a subgraph's key component +3. [The Graph Ql Schema](/developing/creating-a-subgraph/ql-schema/) - Write your schema +4. [Writing AssemblyScript Mappings](/developing/creating-a-subgraph/assemblyscript-mappings/) - Write your mappings +5. [Advanced Features](/developing/creating-a-subgraph/advanced/) - Customize your subgraph with advanced features diff --git a/website/pages/yo/developing/creating-a-subgraph/subgraph-manifest.mdx b/website/pages/yo/developing/creating-a-subgraph/subgraph-manifest.mdx new file mode 100644 index 000000000000..7476b7175d57 --- /dev/null +++ b/website/pages/yo/developing/creating-a-subgraph/subgraph-manifest.mdx @@ -0,0 +1,534 @@ +--- +title: Subgraph Manifest +--- + +## Overview + +The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. + +The **subgraph definition** consists of the following files: + +- `subgraph.yaml`: Contains the subgraph manifest + +- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL + +- `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) + +### Subgraph Capabilities + +A single subgraph can: + +- Index data from multiple smart contracts (but not multiple networks). + +- Index data from IPFS files using File Data Sources. + +- Add an entry for each contract that requires indexing to the `dataSources` array. + +The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). + +For the example subgraph listed above, `subgraph.yaml` is: + +```yaml +specVersion: 0.0.4 +description: Gravatar for Ethereum +repository: https://github.com/graphprotocol/graph-tooling +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' + abi: Gravity + startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Gravity + file: ./abis/Gravity.json + eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + - event: UpdatedGravatar(uint256,address,string,string) + handler: handleUpdatedGravatar + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCall + filter: + kind: call + file: ./src/mapping.ts +``` + +## Subgraph Entries + +> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/developing/creating-a-subgraph/ql-schema/). + +The important entries to update for the manifest are: + +- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. + +- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. + +- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. + +- `features`: a list of all used [feature](#experimental-features) names. + +- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. + +- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. + +- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. + +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + +- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. + +- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. + +- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. + +- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. + +- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. + +A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. + +## Event Handlers + +Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. + +### Defining an Event Handler + +An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: dev + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + eventHandlers: + - event: Approval(address,address,uint256) + handler: handleApproval + - event: Transfer(address,address,uint256) + handler: handleTransfer + topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. +``` + +## Call Handlers + +While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. + +Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. + +> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. + +### Defining a Call Handler + +To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + callHandlers: + - function: createGravatar(string,string) + handler: handleCreateGravatar +``` + +The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. + +### Mapping Function + +Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: + +```typescript +import { CreateGravatarCall } from '../generated/Gravity/Gravity' +import { Transaction } from '../generated/schema' + +export function handleCreateGravatar(call: CreateGravatarCall): void { + let id = call.transaction.hash + let transaction = new Transaction(id) + transaction.displayName = call.inputs._displayName + transaction.imageUrl = call.inputs._imageUrl + transaction.save() +} +``` + +The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. + +## Block Handlers + +In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. + +### Supported Filters + +#### Call Filter + +```yaml +filter: + kind: call +``` + +_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ + +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. + +The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. + +```yaml +dataSources: + - kind: ethereum/contract + name: Gravity + network: dev + source: + address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravity + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + - Transaction + abis: + - name: Gravity + file: ./abis/Gravity.json + blockHandlers: + - handler: handleBlock + - handler: handleBlockWithCallToContract + filter: + kind: call +``` + +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + +### Mapping Function + +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. + +```typescript +import { ethereum } from '@graphprotocol/graph-ts' + +export function handleBlock(block: ethereum.Block): void { + let id = block.hash + let entity = new Block(id) + entity.save() +} +``` + +## Anonymous Events + +If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: + +```yaml +eventHandlers: + - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) + topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' + handler: handleGive +``` + +An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. + +## Transaction Receipts in Event Handlers + +Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. + +To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. + +```yaml +eventHandlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar + receipt: true +``` + +Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. + +## Order of Triggering Handlers + +The triggers for a data source within a block are ordered using the following process: + +1. Event and call triggers are first ordered by transaction index within the block. +2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. +3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. + +These ordering rules are subject to change. + +> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. + +## Data Source Templates + +A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. + +The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. + +### Data Source for the Main Contract + +First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: Factory + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - Directory + abis: + - name: Factory + file: ./abis/factory.json + eventHandlers: + - event: NewExchange(address,address) + handler: handleNewExchange +``` + +### Data Source Templates for Dynamically Created Contracts + +Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. + +```yaml +dataSources: + - kind: ethereum/contract + name: Factory + # ... other source fields for the main contract ... +templates: + - name: Exchange + kind: ethereum/contract + network: mainnet + source: + abi: Exchange + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/exchange.ts + entities: + - Exchange + abis: + - name: Exchange + file: ./abis/exchange.json + eventHandlers: + - event: TokenPurchase(address,uint256,uint256) + handler: handleTokenPurchase + - event: EthPurchase(address,uint256,uint256) + handler: handleEthPurchase + - event: AddLiquidity(address,uint256,uint256) + handler: handleAddLiquidity + - event: RemoveLiquidity(address,uint256,uint256) + handler: handleRemoveLiquidity +``` + +### Instantiating a Data Source Template + +In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + // Start indexing the exchange; `event.params.exchange` is the + // address of the new exchange contract + Exchange.create(event.params.exchange) +} +``` + +> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> +> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. + +### Data Source Context + +Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: + +```typescript +import { Exchange } from '../generated/templates' + +export function handleNewExchange(event: NewExchange): void { + let context = new DataSourceContext() + context.setString('tradingPair', event.params.tradingPair) + Exchange.createWithContext(event.params.exchange, context) +} +``` + +Inside a mapping of the `Exchange` template, the context can then be accessed: + +```typescript +import { dataSource } from '@graphprotocol/graph-ts' + +let context = dataSource.context() +let tradingPair = context.getString('tradingPair') +``` + +There are setters and getters like `setString` and `getString` for all value types. + +## Start Blocks + +The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. + +```yaml +dataSources: + - kind: ethereum/contract + name: ExampleSource + network: mainnet + source: + address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi: ExampleContract + startBlock: 6627917 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + file: ./src/mappings/factory.ts + entities: + - User + abis: + - name: ExampleContract + file: ./abis/ExampleContract.json + eventHandlers: + - event: NewEvent(address,address) + handler: handleNewEvent +``` + +> **Note:** The contract creation block can be quickly looked up on Etherscan: +> +> 1. Search for the contract by entering its address in the search bar. +> 2. Click on the creation transaction hash in the `Contract Creator` section. +> 3. Load the transaction details page where you'll find the start block for that contract. + +## Indexer Hints + +The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. + +> This feature is available from `specVersion: 1.0.0` + +### Prune + +`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: + +1. `"never"`: No pruning of historical data; retains the entire history. +2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. +3. A specific number: Sets a custom limit on the number of historical blocks to retain. + +``` + indexerHints: + prune: auto +``` + +> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. + +History as of a given block is required for: + +- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history +- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block +- Rewinding the subgraph back to that block + +If historical data as of the block has been pruned, the above capabilities will not be available. + +> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. + +For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: + +To retain a specific amount of historical data: + +``` + indexerHints: + prune: 1000 # Replace 1000 with the desired number of blocks to retain +``` + +To preserve the complete history of entity states: + +``` +indexerHints: + prune: never +``` diff --git a/website/pages/yo/developing/graph-ts/_meta.js b/website/pages/yo/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/yo/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/yo/managing/deprecate-a-subgraph.mdx b/website/pages/yo/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/yo/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/yo/mips-faqs.mdx b/website/pages/yo/mips-faqs.mdx deleted file mode 100644 index ae460989f96e..000000000000 --- a/website/pages/yo/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIPs FAQs ---- - -## Introduction - -> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! - -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/yo/querying/_meta.js b/website/pages/yo/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/yo/querying/_meta.js +++ b/website/pages/yo/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/yo/querying/graph-client/_meta.js b/website/pages/yo/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/yo/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/zh/_meta.js b/website/pages/zh/_meta.js index 7493d5d5282c..f2f3b56163a5 100644 --- a/website/pages/zh/_meta.js +++ b/website/pages/zh/_meta.js @@ -1,21 +1,5 @@ import meta from '../en/_meta.js' export default { - ...structuredClone(meta), - network: 'Graph网络', - '###1': { - type: 'heading', - title: '子图', - }, - developing: '开发', - deploying: '部署', - publishing: '发布', - managing: '管理', - querying: '查询', - cookbook: '导览', - 'release-notes': '发布说明&更新向导', - '###3': { - type: 'heading', - title: '索引', - }, + ...meta, } diff --git a/website/pages/zh/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/zh/deploying/deploying-a-subgraph-to-hosted.mdx deleted file mode 100644 index 4a3aa14316f5..000000000000 --- a/website/pages/zh/deploying/deploying-a-subgraph-to-hosted.mdx +++ /dev/null @@ -1,297 +0,0 @@ ---- -title: 将子图部署到托管服务上 ---- - -> Hosted service endpoints will no longer be available after June 12th 2024. [Learn more](/sunrise). - -This page explains how to deploy a subgraph to the hosted service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). - -## Create a hosted service account - -Before using the hosted service, create an account in our hosted service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [hosted service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. - -## 存储访问令牌 - -创建帐户后,导航到您的 [仪表板](https://thegraph.com/hosted-service/dashboard)。 复制仪表板上显示的访问令牌并运行 `graph auth --product hosted-service `。 这会将访问令牌存储在您的计算机上。 如果您不需要重新生成访问令牌,您就只需要这样做一次。 - -## Create a Subgraph on the hosted service - -Before deploying the subgraph, you need to create it in Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _Add Subgraph_ button and fill in the information below as appropriate: - -**Image** - 选择要用作子图的预览图和缩略图的图像。 - -**Subgraph Name** - 子图名称连同下面将要创建的子图账户名称,将定义用于部署和 GraphQL 端点的`account-name/subgraph-name`样式名称。 _此字段以后无法更改。_ - -**Account** - 创建子图的账户。 这可以是个人或组织的账户。 _以后不能在账户之间移动子图。_ - -**Subtitle** - 将出现在子图卡中的文本。 - -**Description** - 子图的描述,在子图详细信息页面上可见。 - -**GitHub URL** - 存储在 GitHub 上的子图代码的链接。 - -**Hide** - Switching this on hides the subgraph in Graph Explorer. - -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Creating a Subgraph section](/developing/creating-a-subgraph/). - -## Deploy a Subgraph on the hosted service - -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell Graph Explorer to start indexing your subgraph using these files. - -您可以通过运行 `yarn deploy`来部署子图。 - -After deploying the subgraph, Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. - -一旦Graph节点从历史区块中提取了所有数据,子图状态就会切换到`Synced`。在挖掘这些区块时,Graph节点将继续检查子图的区块。 - -## 重新部署子图 - -例如,在更改子图定义以修复实体映射中的问题时,再次运行上面的 `yarn deploy` 命令以部署子图的更新版本。子图的任何更新都需要 Graph 节点 重新索引整个子图,再次从 gensis 区块开始。 - -如果您之前部署的子图仍处于`Syncing`状态,系统则会立即将其替换为新部署的版本。 如果之前部署的子图已经完全同步,Graph 节点会将新部署的版本标记为`Pending Version`,在后台进行同步,只有在新版本同步完成后,才会用新的版本替换当前部署的版本。 这样做可以确保在新版本同步时您仍然有子图可以使用。 - -## 将子图部署到多个网络 - -在某些情况下,您需要将相同的子图部署到多个网络,而不复制其所有代码。随之而来的主要挑战是这些网络上的合约地址不同。 - -### 使用graph-cli - -`graph build`(从`0.29.0版本`开始)和`graph deploy`(从`0.32.0版本`开始)都接受两个新选项: - -```sh -选项: - -... - ---network 从网络配置文件中使用的网络配置 - ---networkfile 网络配置文件路径(默认值:“/networks.json”) -``` - -您可以使用`--network`选项从`json`标准文件(默认为`networks.json`)中指定网络配置,以便在开发期间轻松更新子图。 - -**注意: **`init` 命令现在将根据提供的信息自动生成 `networks.json`。然后,您就可以更新现有的或添加其他网络。 - -如果您没有 `networks.json` 文件,您则需要手动创建一个具有以下结构的文件: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -**注意**:您不必在配置文件中指定任何`模板`(如果有),只需指定`dataSources`。如果`subgraph.yaml`文件中声明了任何`模板`,则其网络将自动更新为`--network`选项指定的模板。 - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -您的网络配置文件应该是这样的: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -现在我们可以运行以下命令之一: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -现在可以进行 `yarn deploy`了。 - -**注意: **如前所述,由于 `graph-cli 0.32.0`,您可以使用 `--network`选项直接运行`yarn deploy`: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### 使用 subgraph.yaml 模板 - -对于允许参数化诸如合约地址等方面的旧的 graph-cli 版本,一种解决方案是使用诸如 [Mustache](https://mustache.github.io/) 或 [Handlebar](https://handlebarsjs.com/) 之类的模板系统生成部分内容。 - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -和 - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -除此之外,您还可以使用可变占位符`{{network}}` 和 `{{address}}`替换清单中的网络名称和地址,并将清单重命名为,例如 `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -为了向任何一个网络生成一个清单,您可以向 `package.json` 添加两个额外的命令以及对`mustache`的依赖: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -[这里](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759)可以找到一个实际的例子。 - -**注意: **这种方法也可以应用于更复杂的情况,在这种情况下,需要替换的不仅仅是合约地址和网络名称,或者也需要从模板生成映射或 ABI。 - -## 检查子图状态 - -如果子图成功同步,这是一个好信号,表明它将永远运行良好。然而,网络上的新触发器可能会导致子图遇到未经测试的错误条件,或者由于性能问题或节点操作符的问题,子图开始落后。 - -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -这将为您提供 `chainHeadBlock`,您可以将其与子图上的 `latestBlock` 进行比较,以检查它是否在后面运行。如果子图已经赶上了链将获取`synced`。如果没有错误发生,`health` 可以获取`healthy` 值,如果有错误发生则获取`failed` 值。在这种情况下,可以检查`fatalError`字段以获得有关此错误的详细信息。 - -## 托管服务子图封存策略 - -The hosted service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. - -To improve the performance of the service for active subgraphs, the hosted service will archive subgraphs that are inactive. - -**A subgraph is defined as "inactive" if it was deployed to the hosted service more than 45 days ago, and if it has received 0 queries in the last 45 days.** - -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's hosted service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. - -## 子图工作室子图封存策略 - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -受此策略影响的每个子图都有一个选项,可以回复有问题的版本。 diff --git a/website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx deleted file mode 100644 index 29be61ca18c9..000000000000 --- a/website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: 将子图部署到子图工作室 ---- - -These are the steps to deploy your subgraph to Subgraph Studio: - -- 安装 Graph CLI(同时使用 yarn 和 npm) -- 在子图工作室中创建你的子图 -- 从 CLI 认证你的账户 -- 将子图部署到子图工作室 - -## 安装 Graph CLI - -There is a CLI to deploy subgraphs to [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install `graph-cli`. This can be done using npm or yarn. - -**用 yarn 安装:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**用 npm 安装:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## 在子图工作室中创建你的子图 - -在部署您的实际子图之前,您需要在[子图工作室](https://thegraph.com/studio/)创建一个子图。我们建议您阅读我们的 [工作室文档](/deploying/subgraph-studio)以了解更多有关这方面的信息。 - -## 初始化你的子图 - -一旦你的子图在子图工作室中被创建,你可以用这个命令初始化子图代码。 - -```bash -graph init --studio -``` - -``值可以在 子图工作室 中你的子图详情页上找到。 - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -运行`graph init` 之后,您将被要求输入要查询的合约地址、网络和 ABI。这样做将在本地计算机上生成一个新文件夹,其中包含一些基本代码,以便开始处理子图。然后您可以完成您的子图,以确保它按照预期工作。 - -## Graph 认证 - -在将你的子图部署到子图工作室之前,你需要在 CLI 内登入你的账户。要做到这一点,您将需要您的部署密钥,您可以找到您的“我的子图”页面或您的子图详细信息页面。 - -以下是你从 CLI 进行认证需要使用的命令: - -```bash -graph auth --studio -``` - -## 将子图部署到子图工作室 - -一旦你准备好了,你可以将你的子图部署到子图工作室。 这样做不会将你的子图发布到去中心化的网络中,它只会将它部署到你的工作室账户中,在那里你将能够测试它并更新元数据。 - -这里是你部署子图需要使用的 CLI 命令。 - -```bash -graph deploy --studio -``` - -运行这个命令后,CLI 会要求提供一个版本标签,你可以随意命名,你可以使用 `0.1`和 `0.2`这样的标签,或者也可以使用字母,如 `uniswap-v2-0.1` 。这些标签将在 Graph Explorer 中可见,并可由策展人用来决定是否要在这个版本上发出信号,所以要明智地选择它们。 - -一旦部署完毕,你可以在子图工作室中使用控制面板测试你的子图,如果需要的话,可以部署另一个版本,更新元数据,当你准备好后,将你的子图发布到Graph浏览器。 diff --git a/website/pages/zh/deploying/hosted-service.mdx b/website/pages/zh/deploying/hosted-service.mdx deleted file mode 100644 index 3f8f65646c02..000000000000 --- a/website/pages/zh/deploying/hosted-service.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: 什么是托管服务? ---- - -> Please note, hosted service endpoints will no longer be available after June 12th 2024 as all subgraphs will need to upgrade to The Graph Network. Please read more in the [Sunrise FAQ](/sunrise) - -本节将引导您完成将子图部署到[托管服务](https://thegraph.com/hosted-service/)的过程。 - -如果您在托管服务上没有账户,您可以用您的 Github 账户注册。一旦进行了身份验证,就可以开始通过 UI 创建子图,并在终端部署它们。托管服务支持许多网络,如 Polygon、 Gnosis Chain、 BNB Chain、 Optimism 和 Arbitrum等。 - -有关详细列表,请参阅[支持的网络](/developing/supported-networks/#hosted-service)。 - -## 创建子图 - -First follow the instructions [here](/developing/creating-a-subgraph/#install-the-graph-cli) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` - -### 基于现有合约 - -如果您已经将智能合约部署到以太坊主网或其中一个测试网,那么从该合约中引导一个新的子图可能是开始使用托管服务的好方法。 - -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from the block explorer. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from the block explorer, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. - -```sh ---network \ ---abi \ -``` - -在本例中,``是您的 GitHub 用户或组织名称,``是您的子图名称,``是Graph init 放置示例子图清单目录的可选名称。`` 是您现有合约的地址。`` 是合约所依赖的网络的名称。``是合约 ABI 文件的本地路径。**网络和 abi 都是可选的。** - -### 基于子图示例 - -`graph init` 支持的第二种模式是从示例子图创建新项目。 以下命令执行此操作: - -``` -graph init --from-example --product hosted-service / [] -``` - -示例子图基于 Dani Grant 的 Gravity 合约,该合约管理用户头像,并在创建或更新头像时发出 `NewGravatar` 或 `UpdateGravatar` 事件。子图通过将 `Gravatar` 实体写入 Graph 节点存储并确保根据事件更新它们来处理这些事件。继续查看[子图清单](/developing/creating-a-subgraph#the-subgraph-manifest),以便更好地理解智能合约中需要关注的事件、映射等等。 - -### From a Proxy Contract - -To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. - -```sh -graph init \ - --product hosted-service - --from-contract \ - / [] -``` - -## 托管服务支持的网络 - -您可以在[这里](/developing/supported-networks)找到支持的网络列表。 diff --git a/website/pages/zh/deploying/subgraph-studio.mdx b/website/pages/zh/deploying/subgraph-studio.mdx deleted file mode 100644 index 5aca9ee96f83..000000000000 --- a/website/pages/zh/deploying/subgraph-studio.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: How to Use Subgraph Studio ---- - -欢迎使用您的新启动板👩🏽‍🚀 - -Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). - -What you can do in Subgraph Studio: - -- 通过 Studio UI 创建子图 -- 使用 CLI 部署子图 -- 使用 Studio UI 发布子图 -- 在面板上测试子图 -- 使用查询 URL 将其集成到暂存中 -- 为特定子图创建和管理 API 密钥 - -Here in Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. - -查询子图会产生查询费用,用于奖励Graph网络上的[索引人](/network/indexing)。如果您是一个 dapp 开发人员或子图开发人员,工作室将授权您构建更好的子图,以支持您或您的社区的查询。工作室由5个主要部分组成: - -- 您的用户账户控件 -- 您创建的子图列表 -- 管理、查看细节及可视化特定子图状态的部分 -- 用于管理 API 密钥的部分,您需要查询子图 -- 管理账单的部分 - -## 如何创建您的账户 - -1. Sign in with your wallet - you can do this via MetaMask, WalletConnect, Coinbase Wallet or Safe. -1. 一旦您登录,您将在您的账户主页上看到您唯一的部署密钥。这将允许您发布您的子图或管理您的 API 密钥 + 计费。您将拥有一个惟一的部署密钥,如果您认为该密钥已被破坏,则可以重新生成该密钥。 - -## 如何在子图工作室中创建子图 - - - -## 子图与图形网络的兼容性 - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- 索引[支持的网络](/developing/supported-networks) -- 不得使用以下任何功能: - - ipfs.cat & ipfs.map - - 非致命错误 - - 嫁接 - -更多功能& 和网络将逐渐添加到图形网络。 - -### 子图生命周期流程 - -![子图生命周期](/img/subgraph-lifecycle.png) - -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (psst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. - -## 在子图工作室中测试您的子图 - -如果您想在将子图发布到网络之前对其进行测试,您可以在子图 **面板** 中执行此操作或查看您的日志。 子图日志将告诉您**在哪里**您的子图在发生什么情况下失败。 - -## 在子图工作室中发布您的子图 - -你已经走到了这一步——祝贺你! - -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [section](/publishing/publishing-a-subgraph/). - -还可以查看下面的视频概述: - - - -Remember, while you’re going through your publishing flow, you’ll be able to push to either Arbitrum One or Arbitrum Sepolia. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Arbitrum Sepolia, which is free to do. This will allow you to see how the subgraph will work in Graph Explorer and will allow you to test curation elements. - -索引人需要提交特定区块hash的索引记录的强制性证明。因为发布子图是在链上执行的操作,所以请记住,交易可能需要几分钟的时间才能完成。用于发布合同的任何地址都将是唯一能够发布未来版本的地址。明智地选择! - -具有策展信号的子图显示给索引人,以便它们可以在去中心化网络上被索引。 您可以在一个交易中发布子图和信号,这允许您在子图上创建第一个策展信号并节省费用成本。 通过将您的信号添加到策展人稍后提供的信号中,您的子图也将有更高的机会最终提供查询。 - -**现在您已经发布了您的子图,让我们来看看您将如何定期管理它们。**请注意,如果您的子图有 同步失败。 这通常是因为子图有错误 - 日志会告诉您这些问题存在于哪里! - -## 使用 CLI 对子图进行版本控制 - -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. - -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. - -请注意,在网络上发布子图的新版本是有成本的。除了交易费用,开发人员还必须为自动迁移信号的部分策展税提供资金。如果策展人没有在子图上发出信号,则不能发布子图的新版本。更多关于策展风险的信息,请点击[这里](/network/curating)阅读。 - -### 子图版本的自动归档 - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. - -![子图工作室 - 取消归档](/img/Unarchive.png) diff --git a/website/pages/zh/developing/creating-a-subgraph.mdx b/website/pages/zh/developing/creating-a-subgraph.mdx deleted file mode 100644 index d74984eefbde..000000000000 --- a/website/pages/zh/developing/creating-a-subgraph.mdx +++ /dev/null @@ -1,1601 +0,0 @@ ---- -title: 创建子图 ---- - -子图从区块链中提取数据,对其进行处理并存储,以便通过 GraphQL 轻松查询。 - -![定义子图](/img/defining-a-subgraph.png) - -子图定义由几个文件组成: - -- `subgraph.yaml`: 包含子图清单的 YAML 文件 - -- `schema.graphql`: 一个 GraphQL 模式文件,它定义了为您的子图存储哪些数据,以及如何通过 GraphQL 查询这些数据 - -- `AssemblyScript映射`: 将事件数据转换为模式中定义的实体(例如本教程中的`mapping.ts`)的 [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) 代码 - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [3,000 GRT](/sunrise/#how-can-i-ensure-high-quality-of-service-and-redundancy-for-subgraphs-on-the-graph-network). - -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-tooling) which you will need to build and deploy a subgraph. - -## 安装 Graph CLI - -Graph CLI 是使用 JavaScript 编写的,您需要安装`yarn`或 `npm`才能使用它;以下教程中假设您已经安装了 yarn。 - -一旦您安装了`yarn`,可以通过运行以下命令安装 Graph CLI - -**用 yarn 安装:** - -```bash -yarn global add @graphprotocol/graph-cli -``` - -**用 npm 安装:** - -```bash -npm install -g @graphprotocol/graph-cli -``` - -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph in Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. - -## 基于现有合约 - -以下命令创建一个索引现有合约的所有事件的子图。 它尝试从 Etherscan 获取合约 ABI 并回退到请求本地文件路径。 如果缺少任何可选参数,它会带您进入交互式表单。 - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -`` 是您在 Subgraph Studio 中的子图 ID,可以在您的子图详细信息页面上找到。 - -## 基于子图示例 - -`graph init` 支持的第二种模式是从示例子图创建新项目。 以下命令执行此操作: - -```sh -graph init --studio -``` - -The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. - -## 将新数据源添加到现有子图 - -从`v0.31.0`开始,`graph cli`支持通过`graph add`命令向现有子图添加新的数据源。 - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -`add` 命令将从 Etherscan 获取 ABI(除非使用 `--abi` 选项指定 ABI 路径),并创建一个新的 `dataSource` 与 `graph init` 命令创建 `dataSource` `--from-contract` 的方式相同,相应地更新架构和映射。 - -`--merge-实体`选项标识开发人员希望如何处理`实体`和`事件`名称冲突: - -- 如果为`true`:新的`数据源`应该使用现有的`事件处理程序`& 和`实体`。 -- 如果为`false`:应使用`${dataSourceName}{EventName}`创建新的实体& 和事件处理程序。 - -合约`地址`将写入相关网络的`networks.json`。 - -> **注意:**使用交互式cli时,在成功运行`graph init`后,将提示您添加新的`dataSource`。 - -## 子图清单文件 - -子图清单 `subgraph.yaml` 定义了您的子图索引的智能合约,这些合约中需要关注的事件,以及如何将事件数据映射到 Graph 节点存储并允许查询的实体。 子图清单的完整规范可以在[这里](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md)找到。 - -对于示例子图,`subgraph.yaml` 的内容是: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -清单中要更新的重要条目是: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`:所有使用的 [功能](#experimental-features) 名称的列表。 - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`:智能合约子图源的地址,以及要使用的智能合约的ABI。 地址是可选的; 省略它允许索引来自所有合约的匹配事件。 - -- `dataSources.source.startBlock`:数据源开始索引的区块的可选编号。 在大多数情况下,我们建议使用创建合约的区块。 - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`:数据源写入存储的实体。 每个实体的模式在 schema.graphql 文件中定义。 - -- `dataSources.mapping.abis`:源合约以及您在映射中与之交互的任何其他智能合约的一个或多个命名 ABI 文件。 - -- `dataSources.mapping.eventHandlers`:列出此子图响应的智能合约事件,映射中的处理程序—示例中为./src/mapping.ts—也将这些事件转换为存储中的实体。 - -- `dataSources.mapping.callHandlers`:列出此子图响应的智能合约函数以及映射中的处理程序,该映射将输入和输出转换为函数调用到存储中的实体。 - -- `dataSources.mapping.blockHandlers`:列出此子图响应的区块以及映射中的处理程序,以便在将区块附加到链时运行。 如果没有过滤器,区块处理程序将在每个区块中运行。 可以通过向处理程序添加为以下类型字段提供可选的调用`过滤器`:`call`。 如果区块包含至少一个对数据源合约的调用,则调用过滤器将运行处理程序。 - -通过为每个需要将数据索引到 `dataSources` 数组的合约添加一个条目,单个子图可以索引来自多个智能合约的数据。 - -### Order of Triggering Handlers - -区块内数据源的触发器使用以下流程进行排序: - -1. 事件和调用触发器首先按区块内的交易索引排序。 -2. 同一交易中的事件和调用触发器使用约定进行排序:首先是事件触发器,然后是调用触发器,每种类型都遵循它们在清单中定义的顺序。 -3. 区块触发器按照它们在清单中定义的顺序,在事件和调用触发器之后运行。 - -这些排序规则可能会发生变化。 - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -#### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -##### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0`. Currently, `eth_calls` can only be declared for event handlers. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text before colon(`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### SpecVersion Releases - -| 版本 | Release 说明 | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | - -### 获取 ABI - -ABI 文件必须与您的合约相匹配。 获取 ABI 文件的方法有以下几种: - -- 如果您正在构建自己的项目,您可以获取最新的 ABI。 -- 如果您正在为公共项目构建子图,则可以将该项目下载到您的计算机,并通过使用 [`truffle compile`](https://truffleframework.com/docs/truffle/overview),或使用 solc 进行编译来获取 ABI。 -- 您还可以在 [Etherscan](https://etherscan.io/) 上找到 ABI,但这并不总是可靠的,因为在那里上传的 ABI 可能已过期。 请确保您拥有正确的 ABI,否则您的子图将会运行失败。 - -## GraphQL 模式 - -您子图的模式定义位于文件 `schema.graphql` 中。 GraphQL 模式是使用 GraphQL 接口定义语言定义的。 如果您从未编写过 GraphQL 模式,建议您在 GraphQL 类型系统上查看入门教程。 GraphQL 模式的参考文档可以在 [GraphQL API](/querying/graphql-api) 部分中找到。 - -## 定义实体 - -在定义实体之前,重要的是要退后一步,思考数据的结构和链接方式。 所有查询都将针对子图模式中定义的数据模型和子图索引的实体进行。 因此,最好以符合 dapp 需求的方式定义子图模式。 将实体想象为“包含数据的对象”,而不是事件或函数,可能很有用。 - -使用 Graph,您只需在 `schema.Graphql` 中定义实体类型,Graph 节点将生成顶级字段,用于查询该实体类型的单个实例和集合。应该是一个实体的每个类型都需要用`@entity` 指令进行注释。默认情况下,实体是可变的,这意味着映射可以加载现有实体,修改它们并存储该实体的新版本。可变性是有代价的,对于已知永远不会被修改的实体类型,例如,因为它们只是包含从链中逐字提取的数据,建议使用`@entity (immutable: true)`将它们标记为不可变的。只要这些更改发生在创建实体的同一区块中,映射就可以对不可变实体进行更改。不可变实体的写入和查询速度要快得多,因此应尽可能使用它们。 - -### 好代码的例子 - -下面的 `Gravatar` 实体围绕 Gravatar 对象构建,是如何定义实体的一个很好的示例。 - -```graphql -type Gravatar @entity { - id: Id! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -### 坏榜样 - -下面的示例中,`GravatarAccepted` 和 `GravatarDeclined` 实体都基于事件。 不建议将事件或函数调用以 1:1 的方式映射到实体。 - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -### 可选和必选字段 - -实体字段可以定义为必选或可选。 必选字段由模式中的 `!` 指示。 如果映射中未设置必选字段,则在查询该字段时会收到此错误: - -``` -Null value resolved for non-null field 'name' -``` - -每个实体必须有一个 `id` 字段,其类型必须是 `Bytes!`或者`String!`。通常建议使用`Bytes!`,除非 `id` 包含人类可读的文本,因为有`Bytes!` id的试题比使用`String!` `id`的写入和查询速度会更快!`id` 字段充当主钥,并且需要在同一类型的所有实体中是唯一的。由于历史原因,类型 `ID!`也被接受,是 `String!` 的同义词! - -对于某些实体类型,`id` 是由另外两个实体的 id 构成的; 这可以使用 `concat`,例如,`let id = left t.id.concat (right id)`来从`左边`和`右边`的 id 构成 id。类似地,要从现有实体的 id 和计数器`count`构造 id,可以使用 `id = left t.id.concatI32(count)`。只要`左边`的长度对于所有这样的实体都是相同的,这种串联就一定会产生唯一的 id,例如,因为 `left. id` 是一个 `Address`。 - -### 内置标量类型 - -#### GraphQL 支持的标量 - -我们在 GraphQL API 中支持以下标量: - -| 类型 | 描述 | -| --- | --- | -| `字节` | 字节数组,表示为十六进制字符串。 通常用于以太坊hash和地址。 | -| `字符串` | `string` 值的标量。 不支持空字符,并会自动进行删除。 | -| `Boolean` | `boolean` 值的标量。 | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | 大整数。 用于以太坊的 `uint32`、`int64`、`uint64`、...、`uint256` 类型。 注意:`uint32`以下的所有类型,例如`int32`、`uint24`或`int8`都表示为`i32`。 | -| `BigDecimal` | `BigDecimal` 表示为有效数字和指数的高精度小数。 指数范围是 -6143 到 +6144。 四舍五入到 34 位有效数字。 | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -#### 枚举类型 - -您还可以在模式中创建枚举类型。 枚举类型具有以下语法: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -在模式中定义枚举后,您可以使用枚举值的字符串表示形式在实体上设置枚举字段。 例如,您可以将 `tokenStatus` 设置为 `SecondOwner`,方法是首先定义您的实体,然后使用 `entity.tokenStatus = "SecondOwner` 设置字段。 下面的示例演示了带有枚举字段的 Token 实体: - -有关编写枚举的更多详细信息,请参阅 [GraphQL 文档](https://graphql.org/learn/schema/)。 - -#### 实体关系 - -一个实体可能与模式中的一个或多个其他实体发生联系。 您可以在您的查询中遍历这些联系。 Graph 中的联系是单向的。 可以通过在关系的任一“端”上定义单向关系来模拟双向关系。 - -关系是在实体上定义的,就像任何其他字段一样,除了指定的类型是另一个实体类型。 - -#### 一对一关系 - -使用`TransactionReceipt` 实体类型,它与`Transaction` 实体类型具有可选的一对一关系: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### 一对多关系 - -定义一个 `TokenBalance` 实体类型,它与 Token 实体类型具有一对多关系: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### 反向查找 - -可以通过 `@derivedFrom` 字段在实体上定义反向查找。 这会在实体上创建一个虚拟字段,它可以被查询,但不能通过映射 API 手动设置。 相反的,它是从在另一个实体上定义的关系派生的。 对于这样的关系,将关系的两边都存储起来几乎没有意义,并且当只存储一侧而导出另一侧时,索引和查询性能都会更好。 - -对于一对多关系,关系应始终存储在“一”端,而“多”端应始终派生。 以这种方式存储关系,而不是在“多”端存储实体数组,将大大提高索引和查询子图的性能。 通常,应尽可能避免存储实体数组。 - -#### 示例 - -我们可以通过派生 `tokenBalances` 字段,来使代币的余额可以从代币中访问: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### 多对多关系 - -对于多对多关系,例如每个可能属于任意数量的组织的用户,对关系建模的最直接,但通常不是最高效的方法,是在所涉及的两个实体中的每一个中定义数组。 如果关系是对称的,则只需要存储关系的一侧联系,就可以导出另一侧。 - -#### 示例 - -定义从 `User` 实体类型到 `Organization` 实体类型的反向查找。 在下面的示例中,这是通过从 `Organization` 实体中查找 `members` 属性来实现的。 在查询中,`User` 上的 `organizations` 字段将通过查找包含用户 ID 的所有 `Organization` 实体来解析。 - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -存储这种关系的一种更高效的方法是通过一个映射表,其中每个 `User` / `Organization` 对都有一个条目,其模式如下 - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -这种方法要求查询下降一个额外的级别来检索,例如,用户的组织: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -这种存储多对多关系的更精细的方式将导致为子图存储的数据更少,因此子图的索引和查询速度通常会大大加快。 - -#### 向模式添加注释 - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symble `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## 定义全文搜索字段 - -全文搜索查询根据文本搜索输入来过滤和排列实体。 通过在与索引文本数据进行比较之前,将查询文本输入处理到词干中,全文查询能够返回相似词的匹配项。 - -全文查询定义包括查询名称、用于处理文本字段的语言词典、用于对结果进行排序的排序算法,以及搜索中包含的字段。 每个全文查询可能跨越多个字段,但所有包含的字段必须来自单个实体类型。 - -要添加全文查询,请在 GraphQL 模式中包含带有全文指令的 `_Schema_` 类型。 - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -示例 `bandSearch` 字段可用于查询,以根据 `name`、`description`、`bio` 字段中的文本文档,来过滤 `Band` 实体。 请跳转到 [GraphQL API - 查询](/querying/graphql-api#queries),了解全文搜索 API 的描述和更多示例用法。 - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[功能管理](#experimental-features):**从 `specVersion` `0.0.4` 及以后,必须在子图清单的 `features` 部分下声明 `fullTextSearch` 。 - -### 支持的语言 - -选择不同的语言将对全文搜索 API 产生明确的(尽管有时是微妙的)影响。 全文查询字段涵盖的字段将会在所选语言的内容中进行检查,因此分析和搜索查询产生的词位因语言而异。 例如:当使用支持的土耳其语词典时,“token”的词干为“toke”,而英语词典当然会认为其词干为“token”。 - -支持的语言词典: - -| 代码 | 词典 | -| ------ | ---------- | -| simple | 通用 | -| da | 丹麦语 | -| nl | 荷兰语 | -| en | 英语 | -| fi | 芬兰语 | -| fr | 法语 | -| de | 德语 | -| hu | 匈牙利语 | -| it | 意大利语 | -| no | 挪威语 | -| pt | 葡萄牙语 | -| ro | 罗马尼亚语 | -| ru | 俄语 | -| es | 西班牙语 | -| sv | 瑞典语 | -| tr | 土耳其语 | - -### 排序算法 - -支持的排序结果算法: - -| 算法 | 描述 | -| ------------- | --------------------------------------------- | -| rank | 使用全文查询的匹配质量 (0-1) 对结果进行排序。 | -| proximityRank | 与 rank 类似,但也包括匹配的接近程度。 | - -## 编写映射 - -映射将获取的以太坊数据转换为您的模式文件中定义的实体。 映射是用 [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) 的子集编写的,称为 [[AssemblyScript]](https://github.com/AssemblyScript/assemblyscript/wiki)。 AssemblyScript 可以编译成 WASM ([WebAssembly](https://webassembly.org/))。 AssemblyScript 比普通的 TypeScript 更严格,但提供了开发者熟悉的语法。 - -对于在 `mapping.eventHandlers` 下的 `subgraph.yaml` 中定义的每个事件处理程序,都会创建一个同名的导出函数。 每个处理程序必须接受一个名为 `event` 的参数,其类型对应于正在处理的事件的名称。 - -在示例子图中,`src/mapping.ts` 包含 `NewGravatar` 和 `UpdatedGravatar` 事件的处理程序: - -```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' - -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) - if (gravatar == null) { - gravatar = new Gravatar(id) - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} -``` - -第一个处理程序接受 `NewGravatar` 事件,而且使用 `new Gravatar(event.params.id.toHex())` 创建一个新的 `Gravatar` 实体,使用相应的事件参数填充实体字段。 该实体实例由变量 `gravatar` 表示,id 值为 `event.params.id.toHex()`。 - -第二个处理程序尝试从 Graph 节点存储加载现有的 `Gravatar`。 如果尚不存在,则会按需创建。 然后更新实体以匹配新的事件参数,并使用 `gravatar.save()` 将其保存。 - -### 用于创建新实体的推荐 ID - -It is highly recommended to use `Bytes` as the type for `id` fields, and only use `String` for attributes that truly contain human-readable text, like the name of a token. Below are some recommended `id` values to consider when creating new entities. - -- `transfer.id = event.transaction.hash` - -- `let id = event.transaction.hash.concatI32(event.logIndex.toI32())` - -- For entities that store aggregated data, for e.g, daily trade volumes, the `id` usually contains the day number. Here, using a `Bytes` as the `id` is beneficial. Determining the `id` would look like - -```typescript -let dayID = event.block.timestamp.toI32() / 86400 -let id = Bytes.fromI32(dayID) -``` - -- Convert constant addresses to `Bytes`. - -`const id = Bytes.fromHexString('0xdead...beef')` - -There is a [Graph Typescript Library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) which contains utilities for interacting with the Graph Node store and conveniences for handling smart contract data and entities. It can be imported into `mapping.ts` from `@graphprotocol/graph-ts`. - -### Handling of entities with identical IDs - -When creating and saving a new entity, if an entity with the same ID already exists, the properties of the new entity are always preferred during the merge process. This means that the existing entity will be updated with the values from the new entity. - -If a null value is intentionally set for a field in the new entity with the same ID, the existing entity will be updated with the null value. - -If no value is set for a field in the new entity with the same ID, the field will result in null as well. - -## 代码生成 - -为了使与智能合约、事件和实体的代码编写工作变得简单且类型安全,Graph CLI 可以从子图的 GraphQL 模式和数据源中包含的合约 ABI 生成 AssemblyScript 类型。 - -这可以通过以下命令实现 - -```sh -graph codegen [--output-dir ] [] -``` - -但在大多数情况下,子图已经通过 `package.json` 进行了预配置,以允许您简单地运行以下命令之一来实现相同的目的: - -```sh -# Yarn -yarn codegen - -# NPM -npm run codegen -``` - -这将为 `subgrap.yaml` 中提到的 ABI 文件中的每个智能合约生成一个 AssemblyScript 类,允许您将这些合约绑定到映射中的特定地址,并针对正在处理的区块调用只读合约方法。它还将为每个合约事件生成一个类,以便于访问事件参数以及事件源自的区块和交易。所有这些类型都写入到`//.ts`。在示例子图中,这将`generated/Gravity/Gravity.ts`,允许映射导入这些类型。 - -```javascript -import { - // The contract class: - Gravity, - // The events classes: - NewGravatar, - UpdatedGravatar, -} from '../generated/Gravity/Gravity' -``` - -除此之外,还会为子图的 GraphQL 模式中的每个实体类型生成一个类。 这些类提供类型安全的实体加载、对实体字段的读写访问以及一个 `save()` 方法来写入要存储的实体。 所有实体类都写入 `/schema.ts`,允许映射导入它们 - -```javascript -import { Gravatar } from '../generated/schema' -``` - -> **注意:** 每次更改 GraphQL 模式文件或清单中包含的 ABI 后,都必须再次执行代码生成。 在构建或部署子图之前,它还必须至少执行一次。 - -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. - -## 数据源模板 - -EVM兼容智能合约中的一种常见模式是使用注册表或工厂合约,其中一个合约创建、管理或引用任意数量的其他合约,每个合约都有自己的状态和事件。 - -这些子合约的地址可能事先知道,也可能不知道,其中许多合约可能会随着时间的推移而创建和/或添加。这就是为什么在这种情况下,定义单个数据源或固定数量的数据源是不可能的,需要一种更动态的方法:_数据源模板_。 - -### 主合约的数据源 - -首先,您需要为主合约定义一个常规数据源。 下面的代码片段显示了 [Uniswap](https://uniswap.org) 交换工厂合约的简化示例数据源。 注意 `NewExchange(address,address)` 事件处理程序。 当工厂合约在链上创建新交换合约时,会发出此消息。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### 动态创建合约的数据源模板 - -然后,将 _数据源模板_ 添加到清单中。 它们与常规数据源相同,只是在 `source` 下缺少预先定义的合约地址。 通常,您需要为母合约管理或引用的每种类型的子合约定义一个模板。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### 实例化数据源模板 - -在最后一步中,您可以更新主合约映射,以便从其中一个模板创建动态数据源实例。 在此示例中,您将更改主合约映射以导入 `Exchange` 模板,并在其上调用 `Exchange.create(address)` 方法,从而开始索引新交换合约。 - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **注意:** 新的数据源只会处理创建它的区块和所有后续区块的调用和事件,而不会处理历史数据,也就是包含在先前区块中的数据。 -> -> 如果先前的区块包含与新数据源相关的数据,最好通过读取合约的当前状态,并在创建新数据源时创建表示该状态的实体来索引该数据。 - -### 数据源背景 - -数据源背景允许在实例化模板时传递额外的配置。 在我们的示例中,假设交易所与特定的交易对相关联,该交易对包含在 `NewExchange` 事件中。 该信息可以传递到实例化的数据源中,如下所示: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -在 `Exchange` 模板的映射中,可以访问背景: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -对于所有的值类型,都有像 `setString` 和 `getString` 这样的 setter 和 getter。 - -## 起始区块 - -`startBlock` 是一个可选配置,允许您定义数据源从区块链中的哪个区块开始索引。 设置起始区块允许数据源跳过潜在的数百万个不相关的区块。 通常,子图开发人员会将 `startBlock` 设置为创建数据源智能合约的区块。 - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **注意:** 合约创建区块可以在 Etherscan 上快速查找: -> -> 1. 通过在搜索栏中输入合约地址来搜索合约。 -> 2. 单击 `Contract Creator` 部分中的创建交易hash。 -> 3. 加载交易详情页面,您将在其中找到该合约的起始区块。 - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` - -You can check the earliest block (with historical state) for a given subgraph by querying the [Indexing Status API](/deploying/deploying-a-subgraph-to-hosted/#checking-subgraph-health): - -``` -{ - indexingStatuses(subgraphs: ["Qm..."]) { - subgraph - synced - health - chains { - earliestBlock { - number - } - latestBlock { - number - } - chainHeadBlock { number } - } - } -} -``` - -Note that the `earliestBlock` is the earliest block with historical data, which will be more recent than the `startBlock` specified in the manifest, if the subgraph has been pruned. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## 调用处理程序 - -虽然事件提供了一种收集合约状态相关变换的有效方法,但许多合约避免生成日志以优化 gas 成本。 在这些情况下,子图可以订阅对数据源合约的调用。 这是通过定义引用函数签名的调用处理程序,及处理对该函数调用的映射处理程序来实现的。 为了处理这些调用,映射处理程序将接收一个 `ethereum.Call` 作为参数,其中包含调用的类型化输入和输出。 在交易调用链中的任何深度进行的调用都会触发映射,从而捕获通过代理合约与数据源合约的交互活动。 - -调用处理程序只会在以下两种情况之一触发:当指定的函数被合约本身以外的账户调用时,或者当它在 Solidity 中被标记为外部,并作为同一合约中另一个函数的一部分被调用时。 - -> **注意:** 调用处理程序目前依赖于 Parity 跟踪 API。某些网络,如 BNB 链和 Arbitrum,不支持此 API。如果索引其中一个网络的子图包含一个或多个调用处理程序,它将不会开始同步。子图开发人员应该使用事件处理程序。它们比调用处理程序性能好得多,并且在每个 evm 网络上都受到支持。 - -### 定义调用处理程序 - -要在清单中定义调用处理程序,只需在您要订阅的数据源下添加一个 `callHandlers` 数组。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -`function` 是用于过滤调用的规范化函数签名。 `handler` 属性是映射中您希望在数据源合约中调用目标函数时执行的函数名称。 - -### 映射函数 - -每个调用处理程序都有一个参数,该参数的类型对应于被调用函数的名称。 在上面的示例子图中,映射包含一个处理程序,用于调用 `createGravatar` 函数并接收 `CreateGravatarCall` 参数作为参数: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -`handleCreateGravatar` 函数接受一个新的 `CreateGravatarCall`,它是 `@graphprotocol/graph-ts`提供的`ethereum.Call` 的子类,包括调用的输入和输出。 `CreateGravatarCall` 类型是在您运行 `graph codegen` 时为您生成的。 - -## 区块处理程序 - -除了订阅合约事件或函数调用之外,子图可能还希望在将新区块附加到链上时更新其数据。 为了实现这一点,子图可以在每个区块之后,或匹配预定义过滤器的区块之后,运行一个函数。 - -### 支持的过滤器 - -#### 调用筛选器 - -```yaml -filter: - kind: call -``` - -_对于每个包含对定义处理程序的合约(数据源)调用的区块,相应的处理程序都会被调用一次。_ - -> **注意:** `调用`处理程序目前依赖于 Parity 跟踪 API。某些网络,如 BNB 链和 Arbitrum,不支持此 API。如果索引其中一个网络的子图包含一个或多个带过滤器的区块`调用`处理程序,它将不会开始同步。 - -块处理程序没有过滤器将确保每个块都调用处理程序。对于每种过滤器类型,一个数据源只能包含一个块处理程序。 - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### 投票筛选器 - -> **Requires `specVersion` >= 0.0.8** - -> **注意:** 投票筛选器仅适用于`kind: ethereum`的数据源。 - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -所定义的处理程序将在每`n`个块上被调用一次,其中`n`的值由`every`字段提供。这种配置允许子图以固定的区块间隔执行特定的操作。 - -#### 一次性筛选器 - -> **Requires `specVersion` >= 0.0.8** - -> **注意:** 一次性筛选器仅适用于`kind: ethereum`的数据源。 - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -带有 "once filter" 的所定义处理程序将在所有其他处理程序运行之前仅被调用一次。这种配置允许子图将该处理程序用作初始化处理程序,在索引开始时执行特定任务。 - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### 映射函数 - -映射函数将接收 `ethereum.Block` 作为其唯一参数。 与事件的映射函数一样,此函数可以访问存储中现有的子图实体、调用智能合约、以及创建或更新实体。 - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## 匿名事件 - -如果您需要在 Solidity 中处理匿名事件,可以通过提供事件的主题 0 来实现,如示例所示: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -只有当签名和主题 0 都匹配时才会触发事件。 默认情况下,`topic0` 等于事件签名的hash值。 - -## 事件处理程序中的交易接收 - -从 `specVersion`` 0.0.5`和 `apiVersion`` 0.0.7`开始,事件处理程序可以访问发出它们的交易接收。 - -要做到这一点,事件处理程序必须在子图清单中用新的`receipt: true` 密钥声明,该密钥是可选的,默认为 false。 - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -在处理程序函数内部,可以在 `Event.Receipt`字段中访问收据。当`接收`密钥设置为 `false` 或在清单中省略时,将返回`空`值。 - -## 实验性特征 - -从 `specVersion` `0.0.4` 开始,子图特征必须使用它们的 `camelCase` 名称,在清单文件顶层的 `features` 部分中显式声明,如下表所列: - -| 特征 | 名称 | -| ----------------------------- | ---------------- | -| [非致命错误](#非致命错误) | `nonFatalErrors` | -| [全文搜索](#定义全文搜索字段) | `fullTextSearch` | -| [嫁接](#嫁接到现有子图) | `grafting` | - -例如,如果子图使用 **Full-Text Search** 和 **Non-fatal Errors** 功能,则清单中的 `features` 字段应为: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -请注意,在子图部署期间使用未声明的特性会导致**验证错误**,但如果声明了特性未使用,则不会出现错误。 - -### Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -#### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -### 非致命错误 - -在默认情况下,已同步子图上的索引错误会导致子图失败并停止同步。 子图也可以配置为忽略引发错误的处理程序所做的更改, 在出现错误时继续同步。 这使子图作者有时间更正他们的子图,同时继续针对最新区块提供查询,尽管由于导致错误的代码问题,结果可能会不一致。 请注意,某些错误仍然总是致命的,要成为非致命错误,首先需要确定相应的错误是确定性的错误。 - -> **注意:** Graph 网络尚不支持非致命错误,开发人员不应通过工作室将使用该功能的子图部署到网络。 - -启用非致命错误需要在子图清单上设置以下功能标志: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -查询还必须通过 `subgraphError` 参数选择查询可能存在不一致的数据。 还建议查询 `_meta` 以检查子图是否跳过错误,如示例: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -如果子图遇到错误,则查询将返回数据和带有消息 `"indexing_error"` 的 graphql 错误,如以下示例响应所示: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -### 嫁接到现有子图 - -> **注意:** 在初次升级到The Graph Network时,不建议使用grafting。可以在[这里](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network)了解更多信息。 - -首次部署子图时,它会在相应链的启动区块(或每个数据源定义的 `startBlock` 处)开始索引事件。在某些情况下,可以使用现有子图已经索引的数据并在更晚的区块上开始索引。 这种索引模式称为*Grafting*。 例如,嫁接在开发过程中非常有用,可以快速克服映射中的简单错误,或者在现有子图失败后暂时恢复工作。 - -当 `subgraph.yaml` 中的子图清单在顶层包含 `graft` 区块时,子图被嫁接到基础子图: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -当部署其清单包含 `graft` 区块的子图时,Graph 节点将复制 `base` 子图的数据,直到并包括给定的 `区块`,然后继续从该区块开始索引新子图。 基础子图必须存在于目标图节点实例上,并且必须至少索引到给定区块。 由于这个限制,嫁接只能在开发期间或紧急情况下使用,以加快生成等效的非嫁接子图。 - -因为嫁接是拷贝而不是索引基础数据,所以子图同步到所需区块比从头开始索引要快得多,尽管对于非常大的子图,初始数据拷贝仍可能需要几个小时。 在初始化嫁接子图时,Graph 节点将记录有关已复制的实体类型的信息。 - -嫁接子图可以使用一个GraphQL模式,该模式与某个基本子图不同,但仅与基本子图兼容。它本身必须是一个有效的子图模式,但是可以通过以下方式偏离基本子图的模式: - -- 它添加或删除实体类型 -- 它从实体类型中删除属性 -- 它将可为空的属性添加到实体类型 -- 它将不可为空的属性转换为可空的属性 -- 它将值添加到枚举类型中 -- 它添加或删除接口 -- 它改变了实现接口的实体类型 - -> **[特征管理](#experimental-features):** `grafting`必须在子图清单中的`features`下声明。 - -## IPFS/Arweave File Data Sources - -文件数据源是一种新的子图功能,用于以稳健、可扩展的方式在索引期间访问链下数据。文件数据源支持从IPFS和Arweave获取文件。 - -> 这也为链外数据的确定性索引以及引入任意HTTP源数据奠定了基础。 - -### 概述 - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> 这将替换现有的`ipfs.cat` API - -### 升级指南 - -#### 更新`graph-ts`和`graph-cli` - -文件数据源需要graph-ts>=0.29.0和graph-cli>=0.33.1 - -#### 添加新的实体类型,当找到文件时将更新该类型 - -文件数据源不能访问或更新基于链的实体,但必须更新特定于文件的实体。 - -这可能意味着将现有实体中的字段拆分为单独的实体,并链接在一起。 - -原始合并实体: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -新拆分实体: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -如果母实体与生成的文件数据源实体之间的关系为1:1,则最简单的模式是通过使用IPFS CID作为查找将母实体链接到生成的文件实体。如果您在建模新的基于文件的实体时遇到困难,请联系Discord! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### 添加一个新的模板数据源,使用`kind: file/ipfs`或`kind: file/arweave`。 - -这是在识别出感兴趣的文件时生成的数据源。 - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> 目前需要`abis`,但无法从文件数据源中调用合同 - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### 创建新处理程序以处理文件 - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -文件的CID作为可读字符串可通过`数据源访问`,如下所示: - -```typescript -const cid = dataSource.stringParam() -``` - -示例处理程序: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### 需要时生成文件数据源 - -现在,您可以在执行基于链的处理程序期间创建文件数据源: - -- 从自动生成的`模板`导入模板 -- 从映射中调用`TemplateName.create(cid:string)`,其中cid是有效的IPFS或Arweave内容标识符 - -对于IPFS,Graph Node支持[v0和v1内容标识符](https://docs.ipfs.tech/concepts/content-addressing/),以及带有目录的内容标识符(例如`bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`)。 - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -例子: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -这将创建一个新的文件数据源,该数据源将轮询Graph Node配置的IPFS或Arweave端点,如果未找到文件,则进行重试。当找到文件时,文件数据源处理程序将被执行。 - -此示例使用 CID 作为母 `Token` 实体和生成的 `TokenMetadata` 实体之间的查找。 - -> 以前,子图开发人员会在此时调用 `ipfs.cat (CID)`来获取文件。 - -祝贺您,您正在使用文件数据源! - -#### 将你的子图部署 - -现在,您可以将子图`构建`并`部署`到任何Graph Node>=v0.30.0-rc.0。 - -#### 限制 - -文件数据源处理程序和实体与其他子图实体隔离,确保它们在执行时是确定的,并确保基于链的数据源不受污染。具体来说: - -- 文件数据源创建的实体是不可变的,不能更新 -- 文件数据源处理程序无法访问其他文件数据源中的实体 -- 基于链的处理程序无法访问与文件数据源关联的实体 - -> 虽然这个约束对于大多数用例不应该是有问题的,但是对于某些用例,它可能会引入复杂性。如果您在子图中基于文件数据建模时遇到问题,请通过 Discord 与我们联系! - -此外,不可能从文件数据源创建数据源,无论是线上数据源还是其他文件数据源。这项限制将来可能会取消。 - -#### 最佳实践 - -如果要将 NFT 元数据链接到相应的代币,请使用元数据的 IPFS hash从代币实体引用元数据实体。使用 IPFS hash作为 ID 保存元数据实体。 - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -如果您有多次刷新的实体,请使用 IPFS & 一的基于文件的实体。实体 ID,并使用基于链的实体中的派生字段引用它们 - -> 我们正在努力改进上述建议,因此查询只返回“最新”版本。 - -#### 已知问题 - -文件数据源目前需要 ABI,即使没有使用 ABI ([问题](https://github.com/graphprotocol/graph-cli/issues/961))。解决方法是添加任何 ABI。 - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### 例子 - -[加密魔法师子图迁移](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### 参考 - -[GIP文件数据源](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/zh/developing/creating-a-subgraph/_meta.js b/website/pages/zh/developing/creating-a-subgraph/_meta.js new file mode 100644 index 000000000000..a904468b50a2 --- /dev/null +++ b/website/pages/zh/developing/creating-a-subgraph/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/creating-a-subgraph/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/zh/developing/graph-ts/_meta.js b/website/pages/zh/developing/graph-ts/_meta.js new file mode 100644 index 000000000000..466762da9ce8 --- /dev/null +++ b/website/pages/zh/developing/graph-ts/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/developing/graph-ts/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/zh/managing/deprecate-a-subgraph.mdx b/website/pages/zh/managing/deprecate-a-subgraph.mdx deleted file mode 100644 index 034db6a1c8ee..000000000000 --- a/website/pages/zh/managing/deprecate-a-subgraph.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deprecate a Subgraph ---- - -## Deprecating a Subgraph - -Although you cannot delete a subgraph, you can deprecate it on Graph Explorer. - -### Step-by-Step - -To deprecate your subgraph, do the following: - -1. Visit the contract address for Arbitrum One subgraphs [here](https://arbiscan.io/address/0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec#writeProxyContract). -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Your subgraph will no longer appear in searches on Graph Explorer. - -**Please note the following:** - -- The owner's wallet should call the `deprecateSubgraph` function. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph can withdraw their signal at an average share price. -- Deprecated subgraphs will show an error message. - -> If you interacted with the deprecated subgraph, you can find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/zh/mips-faqs.mdx b/website/pages/zh/mips-faqs.mdx deleted file mode 100644 index b99b7aabbfd2..000000000000 --- a/website/pages/zh/mips-faqs.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: MIP常见问题解答 ---- - -## 介绍 - -> 注意:自2023年5月起,MIPs项目已关闭。感谢所有参与的索引人! - -这是一个可以参与Graph生态系统,激动人心的时刻!2022年[Graph日]期间(https://thegraph.com/graph-day/2022/)Yaniv Tal宣布[即将结束托管服务](https://thegraph.com/blog/sunsetting-hosted-service/),这是Graph生态系统多年来一直致力于的一刻。 - -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). - -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. - -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. - -### Useful Resources - -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) - -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? - -Yes, it is indeed. - -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. - -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). - -### 2. Which chain will the MIPs program incentivise first? - -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. - -### 3. How will new chains be added to the MIPs program? - -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. - -### 4. How will we know when the network is ready for a new chain? - -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. - -### 5. How are rewards divided per chain? - -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. - -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? - -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. - -### 7. When will rewards be distributed? - -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. - -### 8. How does scoring work? - -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: - -**Subgraph Coverage** - -- Are you providing maximal support for subgraphs per chain? - -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. - -**Quality Of Service** - -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? - -- Is the Indexer supporting dapp developers being reactive to their needs? - -Is Indexer allocating efficiently, contributing to the overall health of the network? - -**Community Support** - -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? - -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? - -### 9. How will the Discord role be assigned? - -Moderators will assign the roles in the next few days. - -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? - -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. - -### 11. At what point do you expect participants to add a mainnet deployment? - -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) - -### 12. Will rewards be subject to vesting? - -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. - -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? - -Yes - -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? - -Yes - -### 15. During the MIPs program, will there be a period to dispute invalid POI? - -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation - -### 17. Can we combine two vesting contracts? - -No. The options are: you can delegate one to the other one or run two separate indexers. - -### 18. KYC Questions? - -Please email info@thegraph.foundation - -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? - -Yes - -### 20. Are there recommended regions to run the servers? - -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. - -### 21. What is “handler gas cost”? - -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/zh/network/_meta.js b/website/pages/zh/network/_meta.js index caa8413f6703..49858537c885 100644 --- a/website/pages/zh/network/_meta.js +++ b/website/pages/zh/network/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/network/_meta.js' export default { ...meta, - overview: '概述', } diff --git a/website/pages/zh/querying/_meta.js b/website/pages/zh/querying/_meta.js index 5903eca7ce9a..e52da8f399fb 100644 --- a/website/pages/zh/querying/_meta.js +++ b/website/pages/zh/querying/_meta.js @@ -2,5 +2,4 @@ import meta from '../../en/querying/_meta.js' export default { ...meta, - 'graph-client': undefined, // Remove from sidebar, defined only for `en` language } diff --git a/website/pages/zh/querying/graph-client/_meta.js b/website/pages/zh/querying/graph-client/_meta.js new file mode 100644 index 000000000000..f00c8556ac1b --- /dev/null +++ b/website/pages/zh/querying/graph-client/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/querying/graph-client/_meta.js' + +export default { + ...meta, +} diff --git a/website/route-lockfile.txt b/website/route-lockfile.txt index 3632901afe11..3ba83900fc2c 100644 --- a/website/route-lockfile.txt +++ b/website/route-lockfile.txt @@ -24,12 +24,8 @@ /ar/cookbook/timeseries/ /ar/cookbook/transfer-to-the-graph/ /ar/deploying/deploy-using-subgraph-studio/ -/ar/deploying/deploying-a-subgraph-to-hosted/ -/ar/deploying/hosted-service/ /ar/deploying/multiple-networks/ /ar/deploying/subgraph-studio-faqs/ -/ar/deploying/subgraph-studio/ -/ar/developing/creating-a-subgraph/ /ar/developing/creating-a-subgraph/advanced/ /ar/developing/creating-a-subgraph/assemblyscript-mappings/ /ar/developing/creating-a-subgraph/install-the-cli/ @@ -46,9 +42,7 @@ /ar/glossary/ /ar/graphcast/ /ar/managing/delete-a-subgraph/ -/ar/managing/deprecate-a-subgraph/ /ar/managing/transfer-a-subgraph/ -/ar/mips-faqs/ /ar/network/benefits/ /ar/network/contracts/ /ar/network/curating/ @@ -105,13 +99,8 @@ /cs/cookbook/timeseries/ /cs/cookbook/transfer-to-the-graph/ /cs/deploying/deploy-using-subgraph-studio/ -/cs/deploying/deploying-a-subgraph-to-hosted/ -/cs/deploying/deploying-a-subgraph-to-studio/ -/cs/deploying/hosted-service/ /cs/deploying/multiple-networks/ /cs/deploying/subgraph-studio-faqs/ -/cs/deploying/subgraph-studio/ -/cs/developing/creating-a-subgraph/ /cs/developing/creating-a-subgraph/advanced/ /cs/developing/creating-a-subgraph/assemblyscript-mappings/ /cs/developing/creating-a-subgraph/install-the-cli/ @@ -128,9 +117,7 @@ /cs/glossary/ /cs/graphcast/ /cs/managing/delete-a-subgraph/ -/cs/managing/deprecate-a-subgraph/ /cs/managing/transfer-a-subgraph/ -/cs/mips-faqs/ /cs/network/benefits/ /cs/network/contracts/ /cs/network/curating/ @@ -185,13 +172,8 @@ /de/cookbook/timeseries/ /de/cookbook/transfer-to-the-graph/ /de/deploying/deploy-using-subgraph-studio/ -/de/deploying/deploying-a-subgraph-to-hosted/ -/de/deploying/deploying-a-subgraph-to-studio/ -/de/deploying/hosted-service/ /de/deploying/multiple-networks/ /de/deploying/subgraph-studio-faqs/ -/de/deploying/subgraph-studio/ -/de/developing/creating-a-subgraph/ /de/developing/creating-a-subgraph/advanced/ /de/developing/creating-a-subgraph/assemblyscript-mappings/ /de/developing/creating-a-subgraph/install-the-cli/ @@ -208,9 +190,7 @@ /de/glossary/ /de/graphcast/ /de/managing/delete-a-subgraph/ -/de/managing/deprecate-a-subgraph/ /de/managing/transfer-a-subgraph/ -/de/mips-faqs/ /de/network/benefits/ /de/network/contracts/ /de/network/curating/ @@ -347,13 +327,8 @@ /es/cookbook/timeseries/ /es/cookbook/transfer-to-the-graph/ /es/deploying/deploy-using-subgraph-studio/ -/es/deploying/deploying-a-subgraph-to-hosted/ -/es/deploying/deploying-a-subgraph-to-studio/ -/es/deploying/hosted-service/ /es/deploying/multiple-networks/ /es/deploying/subgraph-studio-faqs/ -/es/deploying/subgraph-studio/ -/es/developing/creating-a-subgraph/ /es/developing/creating-a-subgraph/advanced/ /es/developing/creating-a-subgraph/assemblyscript-mappings/ /es/developing/creating-a-subgraph/install-the-cli/ @@ -370,9 +345,7 @@ /es/glossary/ /es/graphcast/ /es/managing/delete-a-subgraph/ -/es/managing/deprecate-a-subgraph/ /es/managing/transfer-a-subgraph/ -/es/mips-faqs/ /es/network/benefits/ /es/network/contracts/ /es/network/curating/ @@ -429,13 +402,8 @@ /fr/cookbook/timeseries/ /fr/cookbook/transfer-to-the-graph/ /fr/deploying/deploy-using-subgraph-studio/ -/fr/deploying/deploying-a-subgraph-to-hosted/ -/fr/deploying/deploying-a-subgraph-to-studio/ -/fr/deploying/hosted-service/ /fr/deploying/multiple-networks/ /fr/deploying/subgraph-studio-faqs/ -/fr/deploying/subgraph-studio/ -/fr/developing/creating-a-subgraph/ /fr/developing/creating-a-subgraph/advanced/ /fr/developing/creating-a-subgraph/assemblyscript-mappings/ /fr/developing/creating-a-subgraph/install-the-cli/ @@ -452,9 +420,7 @@ /fr/glossary/ /fr/graphcast/ /fr/managing/delete-a-subgraph/ -/fr/managing/deprecate-a-subgraph/ /fr/managing/transfer-a-subgraph/ -/fr/mips-faqs/ /fr/network/benefits/ /fr/network/contracts/ /fr/network/curating/ @@ -493,24 +459,30 @@ /ha/billing/ /ha/chain-integration-overview/ /ha/cookbook/arweave/ +/ha/cookbook/avoid-eth-calls/ /ha/cookbook/cosmos/ +/ha/cookbook/derivedfrom/ /ha/cookbook/enums/ /ha/cookbook/grafting-hotfix/ /ha/cookbook/grafting/ +/ha/cookbook/how-to-secure-api-keys-using-nextjs-server-components/ +/ha/cookbook/immutable-entities-bytes-as-ids/ /ha/cookbook/near/ +/ha/cookbook/pruning/ /ha/cookbook/subgraph-debug-forking/ /ha/cookbook/subgraph-uncrashable/ /ha/cookbook/substreams-powered-subgraphs/ /ha/cookbook/timeseries/ /ha/cookbook/transfer-to-the-graph/ /ha/deploying/deploy-using-subgraph-studio/ -/ha/deploying/deploying-a-subgraph-to-hosted/ -/ha/deploying/deploying-a-subgraph-to-studio/ -/ha/deploying/hosted-service/ /ha/deploying/multiple-networks/ /ha/deploying/subgraph-studio-faqs/ -/ha/deploying/subgraph-studio/ -/ha/developing/creating-a-subgraph/ +/ha/developing/creating-a-subgraph/advanced/ +/ha/developing/creating-a-subgraph/assemblyscript-mappings/ +/ha/developing/creating-a-subgraph/install-the-cli/ +/ha/developing/creating-a-subgraph/ql-schema/ +/ha/developing/creating-a-subgraph/starting-your-subgraph/ +/ha/developing/creating-a-subgraph/subgraph-manifest/ /ha/developing/developer-faqs/ /ha/developing/graph-ts/api/ /ha/developing/graph-ts/common-issues/ @@ -521,10 +493,9 @@ /ha/glossary/ /ha/graphcast/ /ha/managing/delete-a-subgraph/ -/ha/managing/deprecate-a-subgraph/ /ha/managing/transfer-a-subgraph/ -/ha/mips-faqs/ /ha/network/benefits/ +/ha/network/contracts/ /ha/network/curating/ /ha/network/delegating/ /ha/network/developing/ @@ -579,13 +550,8 @@ /hi/cookbook/timeseries/ /hi/cookbook/transfer-to-the-graph/ /hi/deploying/deploy-using-subgraph-studio/ -/hi/deploying/deploying-a-subgraph-to-hosted/ -/hi/deploying/deploying-a-subgraph-to-studio/ -/hi/deploying/hosted-service/ /hi/deploying/multiple-networks/ /hi/deploying/subgraph-studio-faqs/ -/hi/deploying/subgraph-studio/ -/hi/developing/creating-a-subgraph/ /hi/developing/creating-a-subgraph/advanced/ /hi/developing/creating-a-subgraph/assemblyscript-mappings/ /hi/developing/creating-a-subgraph/install-the-cli/ @@ -602,9 +568,7 @@ /hi/glossary/ /hi/graphcast/ /hi/managing/delete-a-subgraph/ -/hi/managing/deprecate-a-subgraph/ /hi/managing/transfer-a-subgraph/ -/hi/mips-faqs/ /hi/network/benefits/ /hi/network/contracts/ /hi/network/curating/ @@ -661,13 +625,8 @@ /it/cookbook/timeseries/ /it/cookbook/transfer-to-the-graph/ /it/deploying/deploy-using-subgraph-studio/ -/it/deploying/deploying-a-subgraph-to-hosted/ -/it/deploying/deploying-a-subgraph-to-studio/ -/it/deploying/hosted-service/ /it/deploying/multiple-networks/ /it/deploying/subgraph-studio-faqs/ -/it/deploying/subgraph-studio/ -/it/developing/creating-a-subgraph/ /it/developing/creating-a-subgraph/advanced/ /it/developing/creating-a-subgraph/assemblyscript-mappings/ /it/developing/creating-a-subgraph/install-the-cli/ @@ -684,9 +643,7 @@ /it/glossary/ /it/graphcast/ /it/managing/delete-a-subgraph/ -/it/managing/deprecate-a-subgraph/ /it/managing/transfer-a-subgraph/ -/it/mips-faqs/ /it/network/benefits/ /it/network/contracts/ /it/network/curating/ @@ -743,13 +700,8 @@ /ja/cookbook/timeseries/ /ja/cookbook/transfer-to-the-graph/ /ja/deploying/deploy-using-subgraph-studio/ -/ja/deploying/deploying-a-subgraph-to-hosted/ -/ja/deploying/deploying-a-subgraph-to-studio/ -/ja/deploying/hosted-service/ /ja/deploying/multiple-networks/ /ja/deploying/subgraph-studio-faqs/ -/ja/deploying/subgraph-studio/ -/ja/developing/creating-a-subgraph/ /ja/developing/creating-a-subgraph/advanced/ /ja/developing/creating-a-subgraph/assemblyscript-mappings/ /ja/developing/creating-a-subgraph/install-the-cli/ @@ -766,9 +718,7 @@ /ja/glossary/ /ja/graphcast/ /ja/managing/delete-a-subgraph/ -/ja/managing/deprecate-a-subgraph/ /ja/managing/transfer-a-subgraph/ -/ja/mips-faqs/ /ja/network/benefits/ /ja/network/contracts/ /ja/network/curating/ @@ -823,13 +773,8 @@ /ko/cookbook/timeseries/ /ko/cookbook/transfer-to-the-graph/ /ko/deploying/deploy-using-subgraph-studio/ -/ko/deploying/deploying-a-subgraph-to-hosted/ -/ko/deploying/deploying-a-subgraph-to-studio/ -/ko/deploying/hosted-service/ /ko/deploying/multiple-networks/ /ko/deploying/subgraph-studio-faqs/ -/ko/deploying/subgraph-studio/ -/ko/developing/creating-a-subgraph/ /ko/developing/creating-a-subgraph/advanced/ /ko/developing/creating-a-subgraph/assemblyscript-mappings/ /ko/developing/creating-a-subgraph/install-the-cli/ @@ -846,9 +791,7 @@ /ko/glossary/ /ko/graphcast/ /ko/managing/delete-a-subgraph/ -/ko/managing/deprecate-a-subgraph/ /ko/managing/transfer-a-subgraph/ -/ko/mips-faqs/ /ko/network/benefits/ /ko/network/contracts/ /ko/network/curating/ @@ -905,13 +848,8 @@ /mr/cookbook/timeseries/ /mr/cookbook/transfer-to-the-graph/ /mr/deploying/deploy-using-subgraph-studio/ -/mr/deploying/deploying-a-subgraph-to-hosted/ -/mr/deploying/deploying-a-subgraph-to-studio/ -/mr/deploying/hosted-service/ /mr/deploying/multiple-networks/ /mr/deploying/subgraph-studio-faqs/ -/mr/deploying/subgraph-studio/ -/mr/developing/creating-a-subgraph/ /mr/developing/creating-a-subgraph/advanced/ /mr/developing/creating-a-subgraph/assemblyscript-mappings/ /mr/developing/creating-a-subgraph/install-the-cli/ @@ -928,9 +866,7 @@ /mr/glossary/ /mr/graphcast/ /mr/managing/delete-a-subgraph/ -/mr/managing/deprecate-a-subgraph/ /mr/managing/transfer-a-subgraph/ -/mr/mips-faqs/ /mr/network/benefits/ /mr/network/contracts/ /mr/network/curating/ @@ -985,13 +921,8 @@ /nl/cookbook/timeseries/ /nl/cookbook/transfer-to-the-graph/ /nl/deploying/deploy-using-subgraph-studio/ -/nl/deploying/deploying-a-subgraph-to-hosted/ -/nl/deploying/deploying-a-subgraph-to-studio/ -/nl/deploying/hosted-service/ /nl/deploying/multiple-networks/ /nl/deploying/subgraph-studio-faqs/ -/nl/deploying/subgraph-studio/ -/nl/developing/creating-a-subgraph/ /nl/developing/creating-a-subgraph/advanced/ /nl/developing/creating-a-subgraph/assemblyscript-mappings/ /nl/developing/creating-a-subgraph/install-the-cli/ @@ -1008,9 +939,7 @@ /nl/glossary/ /nl/graphcast/ /nl/managing/delete-a-subgraph/ -/nl/managing/deprecate-a-subgraph/ /nl/managing/transfer-a-subgraph/ -/nl/mips-faqs/ /nl/network/benefits/ /nl/network/contracts/ /nl/network/curating/ @@ -1065,13 +994,8 @@ /pl/cookbook/timeseries/ /pl/cookbook/transfer-to-the-graph/ /pl/deploying/deploy-using-subgraph-studio/ -/pl/deploying/deploying-a-subgraph-to-hosted/ -/pl/deploying/deploying-a-subgraph-to-studio/ -/pl/deploying/hosted-service/ /pl/deploying/multiple-networks/ /pl/deploying/subgraph-studio-faqs/ -/pl/deploying/subgraph-studio/ -/pl/developing/creating-a-subgraph/ /pl/developing/creating-a-subgraph/advanced/ /pl/developing/creating-a-subgraph/assemblyscript-mappings/ /pl/developing/creating-a-subgraph/install-the-cli/ @@ -1088,9 +1012,7 @@ /pl/glossary/ /pl/graphcast/ /pl/managing/delete-a-subgraph/ -/pl/managing/deprecate-a-subgraph/ /pl/managing/transfer-a-subgraph/ -/pl/mips-faqs/ /pl/network/benefits/ /pl/network/contracts/ /pl/network/curating/ @@ -1147,13 +1069,8 @@ /pt/cookbook/timeseries/ /pt/cookbook/transfer-to-the-graph/ /pt/deploying/deploy-using-subgraph-studio/ -/pt/deploying/deploying-a-subgraph-to-hosted/ -/pt/deploying/deploying-a-subgraph-to-studio/ -/pt/deploying/hosted-service/ /pt/deploying/multiple-networks/ /pt/deploying/subgraph-studio-faqs/ -/pt/deploying/subgraph-studio/ -/pt/developing/creating-a-subgraph/ /pt/developing/creating-a-subgraph/advanced/ /pt/developing/creating-a-subgraph/assemblyscript-mappings/ /pt/developing/creating-a-subgraph/install-the-cli/ @@ -1170,9 +1087,7 @@ /pt/glossary/ /pt/graphcast/ /pt/managing/delete-a-subgraph/ -/pt/managing/deprecate-a-subgraph/ /pt/managing/transfer-a-subgraph/ -/pt/mips-faqs/ /pt/network/benefits/ /pt/network/contracts/ /pt/network/curating/ @@ -1227,13 +1142,8 @@ /ro/cookbook/timeseries/ /ro/cookbook/transfer-to-the-graph/ /ro/deploying/deploy-using-subgraph-studio/ -/ro/deploying/deploying-a-subgraph-to-hosted/ -/ro/deploying/deploying-a-subgraph-to-studio/ -/ro/deploying/hosted-service/ /ro/deploying/multiple-networks/ /ro/deploying/subgraph-studio-faqs/ -/ro/deploying/subgraph-studio/ -/ro/developing/creating-a-subgraph/ /ro/developing/creating-a-subgraph/advanced/ /ro/developing/creating-a-subgraph/assemblyscript-mappings/ /ro/developing/creating-a-subgraph/install-the-cli/ @@ -1250,9 +1160,7 @@ /ro/glossary/ /ro/graphcast/ /ro/managing/delete-a-subgraph/ -/ro/managing/deprecate-a-subgraph/ /ro/managing/transfer-a-subgraph/ -/ro/mips-faqs/ /ro/network/benefits/ /ro/network/contracts/ /ro/network/curating/ @@ -1309,13 +1217,8 @@ /ru/cookbook/timeseries/ /ru/cookbook/transfer-to-the-graph/ /ru/deploying/deploy-using-subgraph-studio/ -/ru/deploying/deploying-a-subgraph-to-hosted/ -/ru/deploying/deploying-a-subgraph-to-studio/ -/ru/deploying/hosted-service/ /ru/deploying/multiple-networks/ /ru/deploying/subgraph-studio-faqs/ -/ru/deploying/subgraph-studio/ -/ru/developing/creating-a-subgraph/ /ru/developing/creating-a-subgraph/advanced/ /ru/developing/creating-a-subgraph/assemblyscript-mappings/ /ru/developing/creating-a-subgraph/install-the-cli/ @@ -1332,9 +1235,7 @@ /ru/glossary/ /ru/graphcast/ /ru/managing/delete-a-subgraph/ -/ru/managing/deprecate-a-subgraph/ /ru/managing/transfer-a-subgraph/ -/ru/mips-faqs/ /ru/network/benefits/ /ru/network/contracts/ /ru/network/curating/ @@ -1391,13 +1292,8 @@ /sv/cookbook/timeseries/ /sv/cookbook/transfer-to-the-graph/ /sv/deploying/deploy-using-subgraph-studio/ -/sv/deploying/deploying-a-subgraph-to-hosted/ -/sv/deploying/deploying-a-subgraph-to-studio/ -/sv/deploying/hosted-service/ /sv/deploying/multiple-networks/ /sv/deploying/subgraph-studio-faqs/ -/sv/deploying/subgraph-studio/ -/sv/developing/creating-a-subgraph/ /sv/developing/creating-a-subgraph/advanced/ /sv/developing/creating-a-subgraph/assemblyscript-mappings/ /sv/developing/creating-a-subgraph/install-the-cli/ @@ -1414,9 +1310,7 @@ /sv/glossary/ /sv/graphcast/ /sv/managing/delete-a-subgraph/ -/sv/managing/deprecate-a-subgraph/ /sv/managing/transfer-a-subgraph/ -/sv/mips-faqs/ /sv/network/benefits/ /sv/network/contracts/ /sv/network/curating/ @@ -1546,13 +1440,8 @@ /tr/cookbook/timeseries/ /tr/cookbook/transfer-to-the-graph/ /tr/deploying/deploy-using-subgraph-studio/ -/tr/deploying/deploying-a-subgraph-to-hosted/ -/tr/deploying/deploying-a-subgraph-to-studio/ -/tr/deploying/hosted-service/ /tr/deploying/multiple-networks/ /tr/deploying/subgraph-studio-faqs/ -/tr/deploying/subgraph-studio/ -/tr/developing/creating-a-subgraph/ /tr/developing/creating-a-subgraph/advanced/ /tr/developing/creating-a-subgraph/assemblyscript-mappings/ /tr/developing/creating-a-subgraph/install-the-cli/ @@ -1569,9 +1458,7 @@ /tr/glossary/ /tr/graphcast/ /tr/managing/delete-a-subgraph/ -/tr/managing/deprecate-a-subgraph/ /tr/managing/transfer-a-subgraph/ -/tr/mips-faqs/ /tr/network/benefits/ /tr/network/contracts/ /tr/network/curating/ @@ -1626,13 +1513,8 @@ /uk/cookbook/timeseries/ /uk/cookbook/transfer-to-the-graph/ /uk/deploying/deploy-using-subgraph-studio/ -/uk/deploying/deploying-a-subgraph-to-hosted/ -/uk/deploying/deploying-a-subgraph-to-studio/ -/uk/deploying/hosted-service/ /uk/deploying/multiple-networks/ /uk/deploying/subgraph-studio-faqs/ -/uk/deploying/subgraph-studio/ -/uk/developing/creating-a-subgraph/ /uk/developing/creating-a-subgraph/advanced/ /uk/developing/creating-a-subgraph/assemblyscript-mappings/ /uk/developing/creating-a-subgraph/install-the-cli/ @@ -1649,9 +1531,7 @@ /uk/glossary/ /uk/graphcast/ /uk/managing/delete-a-subgraph/ -/uk/managing/deprecate-a-subgraph/ /uk/managing/transfer-a-subgraph/ -/uk/mips-faqs/ /uk/network/benefits/ /uk/network/contracts/ /uk/network/curating/ @@ -1708,13 +1588,8 @@ /ur/cookbook/timeseries/ /ur/cookbook/transfer-to-the-graph/ /ur/deploying/deploy-using-subgraph-studio/ -/ur/deploying/deploying-a-subgraph-to-hosted/ -/ur/deploying/deploying-a-subgraph-to-studio/ -/ur/deploying/hosted-service/ /ur/deploying/multiple-networks/ /ur/deploying/subgraph-studio-faqs/ -/ur/deploying/subgraph-studio/ -/ur/developing/creating-a-subgraph/ /ur/developing/creating-a-subgraph/advanced/ /ur/developing/creating-a-subgraph/assemblyscript-mappings/ /ur/developing/creating-a-subgraph/install-the-cli/ @@ -1731,9 +1606,7 @@ /ur/glossary/ /ur/graphcast/ /ur/managing/delete-a-subgraph/ -/ur/managing/deprecate-a-subgraph/ /ur/managing/transfer-a-subgraph/ -/ur/mips-faqs/ /ur/network/benefits/ /ur/network/contracts/ /ur/network/curating/ @@ -1788,13 +1661,8 @@ /vi/cookbook/timeseries/ /vi/cookbook/transfer-to-the-graph/ /vi/deploying/deploy-using-subgraph-studio/ -/vi/deploying/deploying-a-subgraph-to-hosted/ -/vi/deploying/deploying-a-subgraph-to-studio/ -/vi/deploying/hosted-service/ /vi/deploying/multiple-networks/ /vi/deploying/subgraph-studio-faqs/ -/vi/deploying/subgraph-studio/ -/vi/developing/creating-a-subgraph/ /vi/developing/creating-a-subgraph/advanced/ /vi/developing/creating-a-subgraph/assemblyscript-mappings/ /vi/developing/creating-a-subgraph/install-the-cli/ @@ -1811,9 +1679,7 @@ /vi/glossary/ /vi/graphcast/ /vi/managing/delete-a-subgraph/ -/vi/managing/deprecate-a-subgraph/ /vi/managing/transfer-a-subgraph/ -/vi/mips-faqs/ /vi/network/benefits/ /vi/network/contracts/ /vi/network/curating/ @@ -1868,13 +1734,14 @@ /yo/cookbook/timeseries/ /yo/cookbook/transfer-to-the-graph/ /yo/deploying/deploy-using-subgraph-studio/ -/yo/deploying/deploying-a-subgraph-to-hosted/ -/yo/deploying/deploying-a-subgraph-to-studio/ -/yo/deploying/hosted-service/ /yo/deploying/multiple-networks/ /yo/deploying/subgraph-studio-faqs/ -/yo/deploying/subgraph-studio/ -/yo/developing/creating-a-subgraph/ +/yo/developing/creating-a-subgraph/advanced/ +/yo/developing/creating-a-subgraph/assemblyscript-mappings/ +/yo/developing/creating-a-subgraph/install-the-cli/ +/yo/developing/creating-a-subgraph/ql-schema/ +/yo/developing/creating-a-subgraph/starting-your-subgraph/ +/yo/developing/creating-a-subgraph/subgraph-manifest/ /yo/developing/developer-faqs/ /yo/developing/graph-ts/api/ /yo/developing/graph-ts/common-issues/ @@ -1885,9 +1752,7 @@ /yo/glossary/ /yo/graphcast/ /yo/managing/delete-a-subgraph/ -/yo/managing/deprecate-a-subgraph/ /yo/managing/transfer-a-subgraph/ -/yo/mips-faqs/ /yo/network/benefits/ /yo/network/contracts/ /yo/network/curating/ @@ -1944,13 +1809,8 @@ /zh/cookbook/timeseries/ /zh/cookbook/transfer-to-the-graph/ /zh/deploying/deploy-using-subgraph-studio/ -/zh/deploying/deploying-a-subgraph-to-hosted/ -/zh/deploying/deploying-a-subgraph-to-studio/ -/zh/deploying/hosted-service/ /zh/deploying/multiple-networks/ /zh/deploying/subgraph-studio-faqs/ -/zh/deploying/subgraph-studio/ -/zh/developing/creating-a-subgraph/ /zh/developing/creating-a-subgraph/advanced/ /zh/developing/creating-a-subgraph/assemblyscript-mappings/ /zh/developing/creating-a-subgraph/install-the-cli/ @@ -1967,9 +1827,7 @@ /zh/glossary/ /zh/graphcast/ /zh/managing/delete-a-subgraph/ -/zh/managing/deprecate-a-subgraph/ /zh/managing/transfer-a-subgraph/ -/zh/mips-faqs/ /zh/network/benefits/ /zh/network/contracts/ /zh/network/curating/ diff --git a/website/scripts/fix-pages-structure.ts b/website/scripts/fix-pages-structure.ts new file mode 100644 index 000000000000..ce6881b4964b --- /dev/null +++ b/website/scripts/fix-pages-structure.ts @@ -0,0 +1,159 @@ +/** + * This script maintains consistency across different language versions of documentation pages. + * It performs these operations: + * 1. If a page exists in English (en) but is missing in other languages, + * it copies the English version to those languages + * 2. If a page exists in other languages but not in English, + * it removes those pages (as English is the source of truth) + * 3. Ensures each directory has a _meta.js file: + * - For English: lists all pages in that directory + * - For other languages: imports and extends the English version + */ + +import fs from 'fs/promises' +import path from 'path' + +const PAGES_DIR = path.join(process.cwd(), 'pages') +const SOURCE_LANG = 'en' +const META_FILENAME = '_meta.js' +const FORCE_META = process.argv.includes('--force-meta') + +async function getFiles(dir: string): Promise { + const files: string[] = [] + + async function scan(directory: string) { + const items = await fs.readdir(directory, { withFileTypes: true }) + for (const item of items) { + const fullPath = path.join(directory, item.name) + if (item.isDirectory()) { + await scan(fullPath) + } else { + files.push(fullPath) + } + } + } + + await scan(dir) + return files +} + +type SourceMetaOptions = { + files: string[] + baseDir: string +} + +type TranslationMetaOptions = { + depth: number + pathAfterLang: string +} + +function createMetaContent(type: 'source' | 'translation', options: SourceMetaOptions | TranslationMetaOptions) { + if (type === 'source') { + const { files, baseDir } = options as SourceMetaOptions + const pages = files + .map((f) => path.relative(baseDir, f)) + .map((f) => path.basename(f, path.extname(f))) + .filter((f) => f !== path.basename(META_FILENAME, '.js')) + .filter((f) => !f.startsWith('[[...')) + return `export default {\n${pages.map((page) => ` '${page}': '',`).join('\n')}\n}\n` + } + + const createTranslationMeta = (importPath: string) => + `import meta from '${importPath}'\n\nexport default {\n ...meta,\n}\n` + + const { depth, pathAfterLang } = options as TranslationMetaOptions + const importPath = path.posix.join('../'.repeat(depth), 'en', pathAfterLang, META_FILENAME) + return createTranslationMeta(importPath) +} + +async function main() { + const langs = (await fs.readdir(PAGES_DIR)).filter((dir) => /^[a-z]{2}$/.test(dir)) + const sourceDir = path.join(PAGES_DIR, SOURCE_LANG) + const sourceFiles = await getFiles(sourceDir) + + for (const lang of langs) { + if (lang === SOURCE_LANG) continue + const langDir = path.join(PAGES_DIR, lang) + await fs.mkdir(langDir, { recursive: true }) + + // Get all directories from source files + const directories = new Set(sourceFiles.map((f) => path.dirname(path.relative(sourceDir, f)))) + + // Process each directory + for (const dir of directories) { + const targetDir = path.join(langDir, dir) + await fs.mkdir(targetDir, { recursive: true }) + + // Create meta files + const sourceMetaPath = path.join(sourceDir, dir, META_FILENAME) + const targetMetaPath = path.join(targetDir, META_FILENAME) + + // For source (English) meta + if (FORCE_META || !(await fileExists(sourceMetaPath))) { + const filesInDir = sourceFiles.filter((f) => path.dirname(path.relative(sourceDir, f)) === dir) + await fs.writeFile( + sourceMetaPath, + createMetaContent('source', { + files: filesInDir, + baseDir: path.join(sourceDir, dir), + }), + ) + } + + // For translation meta + if (FORCE_META || !(await fileExists(targetMetaPath))) { + const relativeDir = path.relative(PAGES_DIR, targetDir) + const depth = relativeDir.split(path.sep).length + const pathAfterLang = relativeDir.split(path.sep).slice(1).join('/') + await fs.writeFile( + targetMetaPath, + createMetaContent('translation', { + depth, + pathAfterLang, + }), + ) + } + } + + // Sync files + const langFiles = await getFiles(langDir) + const relativeSourceFiles = sourceFiles.map((f) => path.relative(sourceDir, f)) + const relativeTargetFiles = langFiles.map((f) => path.relative(langDir, f)) + + // Copy missing files + for (const file of relativeSourceFiles) { + if ( + !relativeTargetFiles.includes(file) && + path.basename(file) !== META_FILENAME && + !path.basename(file).startsWith('[[...') + ) { + const sourcePath = path.join(sourceDir, file) + const targetPath = path.join(langDir, file) + await fs.mkdir(path.dirname(targetPath), { recursive: true }) + console.log(`Copying ${file} to ${lang}`) + await fs.copyFile(sourcePath, targetPath) + } + } + + // Remove extra files + for (const file of relativeTargetFiles) { + if (!relativeSourceFiles.includes(file) && path.basename(file) !== META_FILENAME) { + const filePath = path.join(langDir, file) + console.log(`Removing ${file} from ${lang}`) + await fs.unlink(filePath) + } + } + } +} + +// Helper function to check if file exists +async function fileExists(filepath: string) { + try { + await fs.access(filepath) + return true + } catch { + return false + } +} + +main().catch(console.error) From 2941c516567b9bfc77aee166f12b29faff5d4eb2 Mon Sep 17 00:00:00 2001 From: benface Date: Fri, 13 Dec 2024 22:58:16 -0500 Subject: [PATCH 2/2] Improve script --- website/scripts/fix-pages-structure.ts | 233 +++++++++++++------------ 1 file changed, 119 insertions(+), 114 deletions(-) diff --git a/website/scripts/fix-pages-structure.ts b/website/scripts/fix-pages-structure.ts index ce6881b4964b..e134b6bb958a 100644 --- a/website/scripts/fix-pages-structure.ts +++ b/website/scripts/fix-pages-structure.ts @@ -1,158 +1,163 @@ /** - * This script maintains consistency across different language versions of documentation pages. - * It performs these operations: - * 1. If a page exists in English (en) but is missing in other languages, - * it copies the English version to those languages - * 2. If a page exists in other languages but not in English, - * it removes those pages (as English is the source of truth) - * 3. Ensures each directory has a _meta.js file: - * - For English: lists all pages in that directory - * - For other languages: imports and extends the English version + * This script ensures that the pages directory structure is consistent across all locales. + * It performs these operations in order: + * + * 1. Identifies directories in the English locale (`pages/en`) that have content files + * (e.g. .mdx, .md, or .json, but really any file that is not hidden or a meta file) + * + * 2. Ensures all those directories have a meta file (_meta.js) + * + * 3. Synchronizes directories in the other locales with English: + * - Creates matching directory structure + * - Creates missing _meta.js files and imports from English + * - Copies content files, excluding catch-all routes ([[...slug]].mdx) + * - Removes files that don't exist in English + * + * 4. Cleans up directories: + * - Deletes directories that have no content files in English + * - Keeps directories in other locales if they have content files in English + * (e.g. `graph-client`, which only has a catch-all route in English); this prevents + * referencing a non-existent directory in the meta file */ import fs from 'fs/promises' import path from 'path' -const PAGES_DIR = path.join(process.cwd(), 'pages') -const SOURCE_LANG = 'en' -const META_FILENAME = '_meta.js' const FORCE_META = process.argv.includes('--force-meta') -async function getFiles(dir: string): Promise { +const PAGES_DIRECTORY = path.join(process.cwd(), 'pages') +const SOURCE_LOCALE = 'en' +const META_FILENAME = '_meta.js' +const CATCH_ALL_PREFIX = '[[...' +const HIDDEN_FILE_PREFIX = '.' + +async function fileExists(filepath: string) { + try { + await fs.access(filepath) + return true + } catch { + return false + } +} + +function isContentFile(filename: string, excludeCatchAll = false) { + return ( + filename !== META_FILENAME && + !filename.startsWith(HIDDEN_FILE_PREFIX) && + (!excludeCatchAll || !filename.startsWith(CATCH_ALL_PREFIX)) + ) +} + +type PagesStructure = { + files: string[] // relative paths to all files + contentDirectories: Set // directories with content files + directories: string[] // all directories for cleanup +} + +async function getPagesStructure(locale: string): Promise { + const localeDirectory = path.join(PAGES_DIRECTORY, locale) const files: string[] = [] + const directories: string[] = [] async function scan(directory: string) { const items = await fs.readdir(directory, { withFileTypes: true }) for (const item of items) { const fullPath = path.join(directory, item.name) if (item.isDirectory()) { + directories.push(path.relative(localeDirectory, fullPath)) await scan(fullPath) } else { - files.push(fullPath) + files.push(path.relative(localeDirectory, fullPath)) } } } - await scan(dir) - return files -} - -type SourceMetaOptions = { - files: string[] - baseDir: string -} + await scan(localeDirectory) -type TranslationMetaOptions = { - depth: number - pathAfterLang: string -} - -function createMetaContent(type: 'source' | 'translation', options: SourceMetaOptions | TranslationMetaOptions) { - if (type === 'source') { - const { files, baseDir } = options as SourceMetaOptions - const pages = files - .map((f) => path.relative(baseDir, f)) - .map((f) => path.basename(f, path.extname(f))) - .filter((f) => f !== path.basename(META_FILENAME, '.js')) - .filter((f) => !f.startsWith('[[...')) - return `export default {\n${pages.map((page) => ` '${page}': '',`).join('\n')}\n}\n` + return { + files, + directories: directories.sort().reverse(), + contentDirectories: new Set( + files.filter((file) => isContentFile(path.basename(file))).map((file) => path.dirname(file)), + ), } - - const createTranslationMeta = (importPath: string) => - `import meta from '${importPath}'\n\nexport default {\n ...meta,\n}\n` - - const { depth, pathAfterLang } = options as TranslationMetaOptions - const importPath = path.posix.join('../'.repeat(depth), 'en', pathAfterLang, META_FILENAME) - return createTranslationMeta(importPath) } async function main() { - const langs = (await fs.readdir(PAGES_DIR)).filter((dir) => /^[a-z]{2}$/.test(dir)) - const sourceDir = path.join(PAGES_DIR, SOURCE_LANG) - const sourceFiles = await getFiles(sourceDir) - - for (const lang of langs) { - if (lang === SOURCE_LANG) continue - const langDir = path.join(PAGES_DIR, lang) - await fs.mkdir(langDir, { recursive: true }) - - // Get all directories from source files - const directories = new Set(sourceFiles.map((f) => path.dirname(path.relative(sourceDir, f)))) - - // Process each directory - for (const dir of directories) { - const targetDir = path.join(langDir, dir) - await fs.mkdir(targetDir, { recursive: true }) - - // Create meta files - const sourceMetaPath = path.join(sourceDir, dir, META_FILENAME) - const targetMetaPath = path.join(targetDir, META_FILENAME) - - // For source (English) meta - if (FORCE_META || !(await fileExists(sourceMetaPath))) { - const filesInDir = sourceFiles.filter((f) => path.dirname(path.relative(sourceDir, f)) === dir) - await fs.writeFile( - sourceMetaPath, - createMetaContent('source', { - files: filesInDir, - baseDir: path.join(sourceDir, dir), - }), + const sourceDirectory = path.join(PAGES_DIRECTORY, SOURCE_LOCALE) + const sourceStructure = await getPagesStructure(SOURCE_LOCALE) + const translatedLocales = (await fs.readdir(PAGES_DIRECTORY)) + .filter((directory) => /^[a-z]{2}$/.test(directory)) + .filter((directory) => directory !== SOURCE_LOCALE) + + // Create/update meta files in source locale directories + for (const directory of sourceStructure.contentDirectories) { + const sourceMetaPath = path.join(sourceDirectory, directory, META_FILENAME) + if (FORCE_META || !(await fileExists(sourceMetaPath))) { + const filesInDirectory = sourceStructure.files + .filter( + (file) => + path.dirname(file) === directory && + isContentFile(path.basename(file), true) && + (file.endsWith('.md') || file.endsWith('.mdx')), ) - } + .map((file) => path.basename(file, path.extname(file))) + await fs.writeFile( + sourceMetaPath, + `export default {\n${filesInDirectory.map((page) => ` '${page}': '',`).join('\n')}\n}\n`, + ) + } + } - // For translation meta - if (FORCE_META || !(await fileExists(targetMetaPath))) { - const relativeDir = path.relative(PAGES_DIR, targetDir) - const depth = relativeDir.split(path.sep).length - const pathAfterLang = relativeDir.split(path.sep).slice(1).join('/') - await fs.writeFile( - targetMetaPath, - createMetaContent('translation', { - depth, - pathAfterLang, - }), - ) + // Synchronize other locales + for (const locale of translatedLocales) { + const localeDirectory = path.join(PAGES_DIRECTORY, locale) + const localeStructure = await getPagesStructure(locale) + + // Create directories and meta files + for (const directory of sourceStructure.contentDirectories) { + const translatedDirectory = path.join(localeDirectory, directory) + await fs.mkdir(translatedDirectory, { recursive: true }) + const translatedMetaPath = path.join(translatedDirectory, META_FILENAME) + if (FORCE_META || !(await fileExists(translatedMetaPath))) { + const depth = path.relative(PAGES_DIRECTORY, translatedDirectory).split(path.sep).length + const importPath = path.posix.join('../'.repeat(depth), SOURCE_LOCALE, directory, META_FILENAME) + await fs.writeFile(translatedMetaPath, `import meta from '${importPath}'\n\nexport default {\n ...meta,\n}\n`) } } - // Sync files - const langFiles = await getFiles(langDir) - const relativeSourceFiles = sourceFiles.map((f) => path.relative(sourceDir, f)) - const relativeTargetFiles = langFiles.map((f) => path.relative(langDir, f)) - // Copy missing files - for (const file of relativeSourceFiles) { - if ( - !relativeTargetFiles.includes(file) && - path.basename(file) !== META_FILENAME && - !path.basename(file).startsWith('[[...') - ) { - const sourcePath = path.join(sourceDir, file) - const targetPath = path.join(langDir, file) - await fs.mkdir(path.dirname(targetPath), { recursive: true }) - console.log(`Copying ${file} to ${lang}`) - await fs.copyFile(sourcePath, targetPath) + for (const file of sourceStructure.files) { + const filename = path.basename(file) + if (!localeStructure.files.includes(file) && isContentFile(filename, true)) { + const sourcePath = path.join(sourceDirectory, file) + const translatedPath = path.join(localeDirectory, file) + await fs.mkdir(path.dirname(translatedPath), { recursive: true }) + console.log(`Copying ${path.join(SOURCE_LOCALE, file)} to ${path.join(locale, file)}`) + await fs.copyFile(sourcePath, translatedPath) } } // Remove extra files - for (const file of relativeTargetFiles) { - if (!relativeSourceFiles.includes(file) && path.basename(file) !== META_FILENAME) { - const filePath = path.join(langDir, file) - console.log(`Removing ${file} from ${lang}`) + for (const file of localeStructure.files) { + const filename = path.basename(file) + if (!sourceStructure.files.includes(file) && isContentFile(filename)) { + const filePath = path.join(localeDirectory, file) + console.log(`Removing ${path.join(locale, file)}`) await fs.unlink(filePath) } } } -} -// Helper function to check if file exists -async function fileExists(filepath: string) { - try { - await fs.access(filepath) - return true - } catch { - return false + // Delete directories that have no content files in the source locale + for (const locale of [SOURCE_LOCALE, ...translatedLocales]) { + const { directories } = await getPagesStructure(locale) + for (const directory of directories) { + if (!sourceStructure.contentDirectories.has(directory)) { + console.log(`Removing directory ${path.join(locale, directory)}`) + await fs.rm(path.join(PAGES_DIRECTORY, locale, directory), { recursive: true, force: true }) + } + } } }