diff --git a/website/route-lockfile.txt b/website/route-lockfile.txt index 8ad8d25f9ae9..10cb9c845c6d 100644 --- a/website/route-lockfile.txt +++ b/website/route-lockfile.txt @@ -17,32 +17,33 @@ /ar/indexing/tooling/graphcast/ /ar/resources/benefits/ /ar/resources/glossary/ -/ar/resources/release-notes/assemblyscript-migration-guide/ -/ar/resources/release-notes/graphql-validations-migration-guide/ +/ar/resources/migration-guides/assemblyscript-migration-guide/ +/ar/resources/migration-guides/graphql-validations-migration-guide/ /ar/resources/roles/curating/ /ar/resources/roles/delegating/delegating/ /ar/resources/roles/delegating/undelegating/ +/ar/resources/subgraph-studio-faq/ /ar/resources/tokenomics/ /ar/sps/introduction/ /ar/sps/sps-faq/ /ar/sps/triggers/ /ar/sps/tutorial/ +/ar/subgraphs/best-practices/avoid-eth-calls/ +/ar/subgraphs/best-practices/derivedfrom/ +/ar/subgraphs/best-practices/grafting-hotfix/ +/ar/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/ar/subgraphs/best-practices/pruning/ +/ar/subgraphs/best-practices/timeseries/ /ar/subgraphs/billing/ /ar/subgraphs/cookbook/arweave/ -/ar/subgraphs/cookbook/avoid-eth-calls/ /ar/subgraphs/cookbook/cosmos/ -/ar/subgraphs/cookbook/derivedfrom/ /ar/subgraphs/cookbook/enums/ -/ar/subgraphs/cookbook/grafting-hotfix/ /ar/subgraphs/cookbook/grafting/ -/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /ar/subgraphs/cookbook/near/ /ar/subgraphs/cookbook/polymarket/ -/ar/subgraphs/cookbook/pruning/ /ar/subgraphs/cookbook/secure-api-keys-nextjs/ /ar/subgraphs/cookbook/subgraph-debug-forking/ /ar/subgraphs/cookbook/subgraph-uncrashable/ -/ar/subgraphs/cookbook/timeseries/ /ar/subgraphs/cookbook/transfer-to-the-graph/ /ar/subgraphs/developing/creating/advanced/ /ar/subgraphs/developing/creating/assemblyscript-mappings/ @@ -56,7 +57,6 @@ /ar/subgraphs/developing/creating/subgraph-manifest/ /ar/subgraphs/developing/creating/unit-testing-framework/ /ar/subgraphs/developing/deploying/multiple-networks/ -/ar/subgraphs/developing/deploying/subgraph-studio-faq/ /ar/subgraphs/developing/deploying/using-subgraph-studio/ /ar/subgraphs/developing/developer-faq/ /ar/subgraphs/developing/introduction/ @@ -103,32 +103,33 @@ /cs/indexing/tooling/graphcast/ /cs/resources/benefits/ /cs/resources/glossary/ -/cs/resources/release-notes/assemblyscript-migration-guide/ -/cs/resources/release-notes/graphql-validations-migration-guide/ +/cs/resources/migration-guides/assemblyscript-migration-guide/ +/cs/resources/migration-guides/graphql-validations-migration-guide/ /cs/resources/roles/curating/ /cs/resources/roles/delegating/delegating/ /cs/resources/roles/delegating/undelegating/ +/cs/resources/subgraph-studio-faq/ /cs/resources/tokenomics/ /cs/sps/introduction/ /cs/sps/sps-faq/ /cs/sps/triggers/ /cs/sps/tutorial/ +/cs/subgraphs/best-practices/avoid-eth-calls/ +/cs/subgraphs/best-practices/derivedfrom/ +/cs/subgraphs/best-practices/grafting-hotfix/ +/cs/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/cs/subgraphs/best-practices/pruning/ +/cs/subgraphs/best-practices/timeseries/ /cs/subgraphs/billing/ /cs/subgraphs/cookbook/arweave/ -/cs/subgraphs/cookbook/avoid-eth-calls/ /cs/subgraphs/cookbook/cosmos/ -/cs/subgraphs/cookbook/derivedfrom/ /cs/subgraphs/cookbook/enums/ -/cs/subgraphs/cookbook/grafting-hotfix/ /cs/subgraphs/cookbook/grafting/ -/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /cs/subgraphs/cookbook/near/ /cs/subgraphs/cookbook/polymarket/ -/cs/subgraphs/cookbook/pruning/ /cs/subgraphs/cookbook/secure-api-keys-nextjs/ /cs/subgraphs/cookbook/subgraph-debug-forking/ /cs/subgraphs/cookbook/subgraph-uncrashable/ -/cs/subgraphs/cookbook/timeseries/ /cs/subgraphs/cookbook/transfer-to-the-graph/ /cs/subgraphs/developing/creating/advanced/ /cs/subgraphs/developing/creating/assemblyscript-mappings/ @@ -142,7 +143,6 @@ /cs/subgraphs/developing/creating/subgraph-manifest/ /cs/subgraphs/developing/creating/unit-testing-framework/ /cs/subgraphs/developing/deploying/multiple-networks/ -/cs/subgraphs/developing/deploying/subgraph-studio-faq/ /cs/subgraphs/developing/deploying/using-subgraph-studio/ /cs/subgraphs/developing/developer-faq/ /cs/subgraphs/developing/introduction/ @@ -187,32 +187,33 @@ /de/indexing/tooling/graphcast/ /de/resources/benefits/ /de/resources/glossary/ -/de/resources/release-notes/assemblyscript-migration-guide/ -/de/resources/release-notes/graphql-validations-migration-guide/ +/de/resources/migration-guides/assemblyscript-migration-guide/ +/de/resources/migration-guides/graphql-validations-migration-guide/ /de/resources/roles/curating/ /de/resources/roles/delegating/delegating/ /de/resources/roles/delegating/undelegating/ +/de/resources/subgraph-studio-faq/ /de/resources/tokenomics/ /de/sps/introduction/ /de/sps/sps-faq/ /de/sps/triggers/ /de/sps/tutorial/ +/de/subgraphs/best-practices/avoid-eth-calls/ +/de/subgraphs/best-practices/derivedfrom/ +/de/subgraphs/best-practices/grafting-hotfix/ +/de/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/de/subgraphs/best-practices/pruning/ +/de/subgraphs/best-practices/timeseries/ /de/subgraphs/billing/ /de/subgraphs/cookbook/arweave/ -/de/subgraphs/cookbook/avoid-eth-calls/ /de/subgraphs/cookbook/cosmos/ -/de/subgraphs/cookbook/derivedfrom/ /de/subgraphs/cookbook/enums/ -/de/subgraphs/cookbook/grafting-hotfix/ /de/subgraphs/cookbook/grafting/ -/de/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /de/subgraphs/cookbook/near/ /de/subgraphs/cookbook/polymarket/ -/de/subgraphs/cookbook/pruning/ /de/subgraphs/cookbook/secure-api-keys-nextjs/ /de/subgraphs/cookbook/subgraph-debug-forking/ /de/subgraphs/cookbook/subgraph-uncrashable/ -/de/subgraphs/cookbook/timeseries/ /de/subgraphs/cookbook/transfer-to-the-graph/ /de/subgraphs/developing/creating/advanced/ /de/subgraphs/developing/creating/assemblyscript-mappings/ @@ -226,7 +227,6 @@ /de/subgraphs/developing/creating/subgraph-manifest/ /de/subgraphs/developing/creating/unit-testing-framework/ /de/subgraphs/developing/deploying/multiple-networks/ -/de/subgraphs/developing/deploying/subgraph-studio-faq/ /de/subgraphs/developing/deploying/using-subgraph-studio/ /de/subgraphs/developing/developer-faq/ /de/subgraphs/developing/introduction/ @@ -273,32 +273,33 @@ /en/indexing/tooling/graphcast/ /en/resources/benefits/ /en/resources/glossary/ -/en/resources/release-notes/assemblyscript-migration-guide/ -/en/resources/release-notes/graphql-validations-migration-guide/ +/en/resources/migration-guides/assemblyscript-migration-guide/ +/en/resources/migration-guides/graphql-validations-migration-guide/ /en/resources/roles/curating/ /en/resources/roles/delegating/delegating/ /en/resources/roles/delegating/undelegating/ +/en/resources/subgraph-studio-faq/ /en/resources/tokenomics/ /en/sps/introduction/ /en/sps/sps-faq/ /en/sps/triggers/ /en/sps/tutorial/ +/en/subgraphs/best-practices/avoid-eth-calls/ +/en/subgraphs/best-practices/derivedfrom/ +/en/subgraphs/best-practices/grafting-hotfix/ +/en/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/en/subgraphs/best-practices/pruning/ +/en/subgraphs/best-practices/timeseries/ /en/subgraphs/billing/ /en/subgraphs/cookbook/arweave/ -/en/subgraphs/cookbook/avoid-eth-calls/ /en/subgraphs/cookbook/cosmos/ -/en/subgraphs/cookbook/derivedfrom/ /en/subgraphs/cookbook/enums/ -/en/subgraphs/cookbook/grafting-hotfix/ /en/subgraphs/cookbook/grafting/ -/en/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /en/subgraphs/cookbook/near/ /en/subgraphs/cookbook/polymarket/ -/en/subgraphs/cookbook/pruning/ /en/subgraphs/cookbook/secure-api-keys-nextjs/ /en/subgraphs/cookbook/subgraph-debug-forking/ /en/subgraphs/cookbook/subgraph-uncrashable/ -/en/subgraphs/cookbook/timeseries/ /en/subgraphs/cookbook/transfer-to-the-graph/ /en/subgraphs/developing/creating/advanced/ /en/subgraphs/developing/creating/assemblyscript-mappings/ @@ -312,7 +313,6 @@ /en/subgraphs/developing/creating/subgraph-manifest/ /en/subgraphs/developing/creating/unit-testing-framework/ /en/subgraphs/developing/deploying/multiple-networks/ -/en/subgraphs/developing/deploying/subgraph-studio-faq/ /en/subgraphs/developing/deploying/using-subgraph-studio/ /en/subgraphs/developing/developer-faq/ /en/subgraphs/developing/introduction/ @@ -359,32 +359,33 @@ /es/indexing/tooling/graphcast/ /es/resources/benefits/ /es/resources/glossary/ -/es/resources/release-notes/assemblyscript-migration-guide/ -/es/resources/release-notes/graphql-validations-migration-guide/ +/es/resources/migration-guides/assemblyscript-migration-guide/ +/es/resources/migration-guides/graphql-validations-migration-guide/ /es/resources/roles/curating/ /es/resources/roles/delegating/delegating/ /es/resources/roles/delegating/undelegating/ +/es/resources/subgraph-studio-faq/ /es/resources/tokenomics/ /es/sps/introduction/ /es/sps/sps-faq/ /es/sps/triggers/ /es/sps/tutorial/ +/es/subgraphs/best-practices/avoid-eth-calls/ +/es/subgraphs/best-practices/derivedfrom/ +/es/subgraphs/best-practices/grafting-hotfix/ +/es/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/es/subgraphs/best-practices/pruning/ +/es/subgraphs/best-practices/timeseries/ /es/subgraphs/billing/ /es/subgraphs/cookbook/arweave/ -/es/subgraphs/cookbook/avoid-eth-calls/ /es/subgraphs/cookbook/cosmos/ -/es/subgraphs/cookbook/derivedfrom/ /es/subgraphs/cookbook/enums/ -/es/subgraphs/cookbook/grafting-hotfix/ /es/subgraphs/cookbook/grafting/ -/es/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /es/subgraphs/cookbook/near/ /es/subgraphs/cookbook/polymarket/ -/es/subgraphs/cookbook/pruning/ /es/subgraphs/cookbook/secure-api-keys-nextjs/ /es/subgraphs/cookbook/subgraph-debug-forking/ /es/subgraphs/cookbook/subgraph-uncrashable/ -/es/subgraphs/cookbook/timeseries/ /es/subgraphs/cookbook/transfer-to-the-graph/ /es/subgraphs/developing/creating/advanced/ /es/subgraphs/developing/creating/assemblyscript-mappings/ @@ -398,7 +399,6 @@ /es/subgraphs/developing/creating/subgraph-manifest/ /es/subgraphs/developing/creating/unit-testing-framework/ /es/subgraphs/developing/deploying/multiple-networks/ -/es/subgraphs/developing/deploying/subgraph-studio-faq/ /es/subgraphs/developing/deploying/using-subgraph-studio/ /es/subgraphs/developing/developer-faq/ /es/subgraphs/developing/introduction/ @@ -445,32 +445,33 @@ /fr/indexing/tooling/graphcast/ /fr/resources/benefits/ /fr/resources/glossary/ -/fr/resources/release-notes/assemblyscript-migration-guide/ -/fr/resources/release-notes/graphql-validations-migration-guide/ +/fr/resources/migration-guides/assemblyscript-migration-guide/ +/fr/resources/migration-guides/graphql-validations-migration-guide/ /fr/resources/roles/curating/ /fr/resources/roles/delegating/delegating/ /fr/resources/roles/delegating/undelegating/ +/fr/resources/subgraph-studio-faq/ /fr/resources/tokenomics/ /fr/sps/introduction/ /fr/sps/sps-faq/ /fr/sps/triggers/ /fr/sps/tutorial/ +/fr/subgraphs/best-practices/avoid-eth-calls/ +/fr/subgraphs/best-practices/derivedfrom/ +/fr/subgraphs/best-practices/grafting-hotfix/ +/fr/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/fr/subgraphs/best-practices/pruning/ +/fr/subgraphs/best-practices/timeseries/ /fr/subgraphs/billing/ /fr/subgraphs/cookbook/arweave/ -/fr/subgraphs/cookbook/avoid-eth-calls/ /fr/subgraphs/cookbook/cosmos/ -/fr/subgraphs/cookbook/derivedfrom/ /fr/subgraphs/cookbook/enums/ -/fr/subgraphs/cookbook/grafting-hotfix/ /fr/subgraphs/cookbook/grafting/ -/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /fr/subgraphs/cookbook/near/ /fr/subgraphs/cookbook/polymarket/ -/fr/subgraphs/cookbook/pruning/ /fr/subgraphs/cookbook/secure-api-keys-nextjs/ /fr/subgraphs/cookbook/subgraph-debug-forking/ /fr/subgraphs/cookbook/subgraph-uncrashable/ -/fr/subgraphs/cookbook/timeseries/ /fr/subgraphs/cookbook/transfer-to-the-graph/ /fr/subgraphs/developing/creating/advanced/ /fr/subgraphs/developing/creating/assemblyscript-mappings/ @@ -484,7 +485,6 @@ /fr/subgraphs/developing/creating/subgraph-manifest/ /fr/subgraphs/developing/creating/unit-testing-framework/ /fr/subgraphs/developing/deploying/multiple-networks/ -/fr/subgraphs/developing/deploying/subgraph-studio-faq/ /fr/subgraphs/developing/deploying/using-subgraph-studio/ /fr/subgraphs/developing/developer-faq/ /fr/subgraphs/developing/introduction/ @@ -531,32 +531,33 @@ /hi/indexing/tooling/graphcast/ /hi/resources/benefits/ /hi/resources/glossary/ -/hi/resources/release-notes/assemblyscript-migration-guide/ -/hi/resources/release-notes/graphql-validations-migration-guide/ +/hi/resources/migration-guides/assemblyscript-migration-guide/ +/hi/resources/migration-guides/graphql-validations-migration-guide/ /hi/resources/roles/curating/ /hi/resources/roles/delegating/delegating/ /hi/resources/roles/delegating/undelegating/ +/hi/resources/subgraph-studio-faq/ /hi/resources/tokenomics/ /hi/sps/introduction/ /hi/sps/sps-faq/ /hi/sps/triggers/ /hi/sps/tutorial/ +/hi/subgraphs/best-practices/avoid-eth-calls/ +/hi/subgraphs/best-practices/derivedfrom/ +/hi/subgraphs/best-practices/grafting-hotfix/ +/hi/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/hi/subgraphs/best-practices/pruning/ +/hi/subgraphs/best-practices/timeseries/ /hi/subgraphs/billing/ /hi/subgraphs/cookbook/arweave/ -/hi/subgraphs/cookbook/avoid-eth-calls/ /hi/subgraphs/cookbook/cosmos/ -/hi/subgraphs/cookbook/derivedfrom/ /hi/subgraphs/cookbook/enums/ -/hi/subgraphs/cookbook/grafting-hotfix/ /hi/subgraphs/cookbook/grafting/ -/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /hi/subgraphs/cookbook/near/ /hi/subgraphs/cookbook/polymarket/ -/hi/subgraphs/cookbook/pruning/ /hi/subgraphs/cookbook/secure-api-keys-nextjs/ /hi/subgraphs/cookbook/subgraph-debug-forking/ /hi/subgraphs/cookbook/subgraph-uncrashable/ -/hi/subgraphs/cookbook/timeseries/ /hi/subgraphs/cookbook/transfer-to-the-graph/ /hi/subgraphs/developing/creating/advanced/ /hi/subgraphs/developing/creating/assemblyscript-mappings/ @@ -570,7 +571,6 @@ /hi/subgraphs/developing/creating/subgraph-manifest/ /hi/subgraphs/developing/creating/unit-testing-framework/ /hi/subgraphs/developing/deploying/multiple-networks/ -/hi/subgraphs/developing/deploying/subgraph-studio-faq/ /hi/subgraphs/developing/deploying/using-subgraph-studio/ /hi/subgraphs/developing/developer-faq/ /hi/subgraphs/developing/introduction/ @@ -617,32 +617,33 @@ /it/indexing/tooling/graphcast/ /it/resources/benefits/ /it/resources/glossary/ -/it/resources/release-notes/assemblyscript-migration-guide/ -/it/resources/release-notes/graphql-validations-migration-guide/ +/it/resources/migration-guides/assemblyscript-migration-guide/ +/it/resources/migration-guides/graphql-validations-migration-guide/ /it/resources/roles/curating/ /it/resources/roles/delegating/delegating/ /it/resources/roles/delegating/undelegating/ +/it/resources/subgraph-studio-faq/ /it/resources/tokenomics/ /it/sps/introduction/ /it/sps/sps-faq/ /it/sps/triggers/ /it/sps/tutorial/ +/it/subgraphs/best-practices/avoid-eth-calls/ +/it/subgraphs/best-practices/derivedfrom/ +/it/subgraphs/best-practices/grafting-hotfix/ +/it/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/it/subgraphs/best-practices/pruning/ +/it/subgraphs/best-practices/timeseries/ /it/subgraphs/billing/ /it/subgraphs/cookbook/arweave/ -/it/subgraphs/cookbook/avoid-eth-calls/ /it/subgraphs/cookbook/cosmos/ -/it/subgraphs/cookbook/derivedfrom/ /it/subgraphs/cookbook/enums/ -/it/subgraphs/cookbook/grafting-hotfix/ /it/subgraphs/cookbook/grafting/ -/it/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /it/subgraphs/cookbook/near/ /it/subgraphs/cookbook/polymarket/ -/it/subgraphs/cookbook/pruning/ /it/subgraphs/cookbook/secure-api-keys-nextjs/ /it/subgraphs/cookbook/subgraph-debug-forking/ /it/subgraphs/cookbook/subgraph-uncrashable/ -/it/subgraphs/cookbook/timeseries/ /it/subgraphs/cookbook/transfer-to-the-graph/ /it/subgraphs/developing/creating/advanced/ /it/subgraphs/developing/creating/assemblyscript-mappings/ @@ -656,7 +657,6 @@ /it/subgraphs/developing/creating/subgraph-manifest/ /it/subgraphs/developing/creating/unit-testing-framework/ /it/subgraphs/developing/deploying/multiple-networks/ -/it/subgraphs/developing/deploying/subgraph-studio-faq/ /it/subgraphs/developing/deploying/using-subgraph-studio/ /it/subgraphs/developing/developer-faq/ /it/subgraphs/developing/introduction/ @@ -703,32 +703,33 @@ /ja/indexing/tooling/graphcast/ /ja/resources/benefits/ /ja/resources/glossary/ -/ja/resources/release-notes/assemblyscript-migration-guide/ -/ja/resources/release-notes/graphql-validations-migration-guide/ +/ja/resources/migration-guides/assemblyscript-migration-guide/ +/ja/resources/migration-guides/graphql-validations-migration-guide/ /ja/resources/roles/curating/ /ja/resources/roles/delegating/delegating/ /ja/resources/roles/delegating/undelegating/ +/ja/resources/subgraph-studio-faq/ /ja/resources/tokenomics/ /ja/sps/introduction/ /ja/sps/sps-faq/ /ja/sps/triggers/ /ja/sps/tutorial/ +/ja/subgraphs/best-practices/avoid-eth-calls/ +/ja/subgraphs/best-practices/derivedfrom/ +/ja/subgraphs/best-practices/grafting-hotfix/ +/ja/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/ja/subgraphs/best-practices/pruning/ +/ja/subgraphs/best-practices/timeseries/ /ja/subgraphs/billing/ /ja/subgraphs/cookbook/arweave/ -/ja/subgraphs/cookbook/avoid-eth-calls/ /ja/subgraphs/cookbook/cosmos/ -/ja/subgraphs/cookbook/derivedfrom/ /ja/subgraphs/cookbook/enums/ -/ja/subgraphs/cookbook/grafting-hotfix/ /ja/subgraphs/cookbook/grafting/ -/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /ja/subgraphs/cookbook/near/ /ja/subgraphs/cookbook/polymarket/ -/ja/subgraphs/cookbook/pruning/ /ja/subgraphs/cookbook/secure-api-keys-nextjs/ /ja/subgraphs/cookbook/subgraph-debug-forking/ /ja/subgraphs/cookbook/subgraph-uncrashable/ -/ja/subgraphs/cookbook/timeseries/ /ja/subgraphs/cookbook/transfer-to-the-graph/ /ja/subgraphs/developing/creating/advanced/ /ja/subgraphs/developing/creating/assemblyscript-mappings/ @@ -742,7 +743,6 @@ /ja/subgraphs/developing/creating/subgraph-manifest/ /ja/subgraphs/developing/creating/unit-testing-framework/ /ja/subgraphs/developing/deploying/multiple-networks/ -/ja/subgraphs/developing/deploying/subgraph-studio-faq/ /ja/subgraphs/developing/deploying/using-subgraph-studio/ /ja/subgraphs/developing/developer-faq/ /ja/subgraphs/developing/introduction/ @@ -787,32 +787,33 @@ /ko/indexing/tooling/graphcast/ /ko/resources/benefits/ /ko/resources/glossary/ -/ko/resources/release-notes/assemblyscript-migration-guide/ -/ko/resources/release-notes/graphql-validations-migration-guide/ +/ko/resources/migration-guides/assemblyscript-migration-guide/ +/ko/resources/migration-guides/graphql-validations-migration-guide/ /ko/resources/roles/curating/ /ko/resources/roles/delegating/delegating/ /ko/resources/roles/delegating/undelegating/ +/ko/resources/subgraph-studio-faq/ /ko/resources/tokenomics/ /ko/sps/introduction/ /ko/sps/sps-faq/ /ko/sps/triggers/ /ko/sps/tutorial/ +/ko/subgraphs/best-practices/avoid-eth-calls/ +/ko/subgraphs/best-practices/derivedfrom/ +/ko/subgraphs/best-practices/grafting-hotfix/ +/ko/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/ko/subgraphs/best-practices/pruning/ +/ko/subgraphs/best-practices/timeseries/ /ko/subgraphs/billing/ /ko/subgraphs/cookbook/arweave/ -/ko/subgraphs/cookbook/avoid-eth-calls/ /ko/subgraphs/cookbook/cosmos/ -/ko/subgraphs/cookbook/derivedfrom/ /ko/subgraphs/cookbook/enums/ -/ko/subgraphs/cookbook/grafting-hotfix/ /ko/subgraphs/cookbook/grafting/ -/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /ko/subgraphs/cookbook/near/ /ko/subgraphs/cookbook/polymarket/ -/ko/subgraphs/cookbook/pruning/ /ko/subgraphs/cookbook/secure-api-keys-nextjs/ /ko/subgraphs/cookbook/subgraph-debug-forking/ /ko/subgraphs/cookbook/subgraph-uncrashable/ -/ko/subgraphs/cookbook/timeseries/ /ko/subgraphs/cookbook/transfer-to-the-graph/ /ko/subgraphs/developing/creating/advanced/ /ko/subgraphs/developing/creating/assemblyscript-mappings/ @@ -826,7 +827,6 @@ /ko/subgraphs/developing/creating/subgraph-manifest/ /ko/subgraphs/developing/creating/unit-testing-framework/ /ko/subgraphs/developing/deploying/multiple-networks/ -/ko/subgraphs/developing/deploying/subgraph-studio-faq/ /ko/subgraphs/developing/deploying/using-subgraph-studio/ /ko/subgraphs/developing/developer-faq/ /ko/subgraphs/developing/introduction/ @@ -873,32 +873,33 @@ /mr/indexing/tooling/graphcast/ /mr/resources/benefits/ /mr/resources/glossary/ -/mr/resources/release-notes/assemblyscript-migration-guide/ -/mr/resources/release-notes/graphql-validations-migration-guide/ +/mr/resources/migration-guides/assemblyscript-migration-guide/ +/mr/resources/migration-guides/graphql-validations-migration-guide/ /mr/resources/roles/curating/ /mr/resources/roles/delegating/delegating/ /mr/resources/roles/delegating/undelegating/ +/mr/resources/subgraph-studio-faq/ /mr/resources/tokenomics/ /mr/sps/introduction/ /mr/sps/sps-faq/ /mr/sps/triggers/ /mr/sps/tutorial/ +/mr/subgraphs/best-practices/avoid-eth-calls/ +/mr/subgraphs/best-practices/derivedfrom/ +/mr/subgraphs/best-practices/grafting-hotfix/ +/mr/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/mr/subgraphs/best-practices/pruning/ +/mr/subgraphs/best-practices/timeseries/ /mr/subgraphs/billing/ /mr/subgraphs/cookbook/arweave/ -/mr/subgraphs/cookbook/avoid-eth-calls/ /mr/subgraphs/cookbook/cosmos/ -/mr/subgraphs/cookbook/derivedfrom/ /mr/subgraphs/cookbook/enums/ -/mr/subgraphs/cookbook/grafting-hotfix/ /mr/subgraphs/cookbook/grafting/ -/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /mr/subgraphs/cookbook/near/ /mr/subgraphs/cookbook/polymarket/ -/mr/subgraphs/cookbook/pruning/ /mr/subgraphs/cookbook/secure-api-keys-nextjs/ /mr/subgraphs/cookbook/subgraph-debug-forking/ /mr/subgraphs/cookbook/subgraph-uncrashable/ -/mr/subgraphs/cookbook/timeseries/ /mr/subgraphs/cookbook/transfer-to-the-graph/ /mr/subgraphs/developing/creating/advanced/ /mr/subgraphs/developing/creating/assemblyscript-mappings/ @@ -912,7 +913,6 @@ /mr/subgraphs/developing/creating/subgraph-manifest/ /mr/subgraphs/developing/creating/unit-testing-framework/ /mr/subgraphs/developing/deploying/multiple-networks/ -/mr/subgraphs/developing/deploying/subgraph-studio-faq/ /mr/subgraphs/developing/deploying/using-subgraph-studio/ /mr/subgraphs/developing/developer-faq/ /mr/subgraphs/developing/introduction/ @@ -957,32 +957,33 @@ /nl/indexing/tooling/graphcast/ /nl/resources/benefits/ /nl/resources/glossary/ -/nl/resources/release-notes/assemblyscript-migration-guide/ -/nl/resources/release-notes/graphql-validations-migration-guide/ +/nl/resources/migration-guides/assemblyscript-migration-guide/ +/nl/resources/migration-guides/graphql-validations-migration-guide/ /nl/resources/roles/curating/ /nl/resources/roles/delegating/delegating/ /nl/resources/roles/delegating/undelegating/ +/nl/resources/subgraph-studio-faq/ /nl/resources/tokenomics/ /nl/sps/introduction/ /nl/sps/sps-faq/ /nl/sps/triggers/ /nl/sps/tutorial/ +/nl/subgraphs/best-practices/avoid-eth-calls/ +/nl/subgraphs/best-practices/derivedfrom/ +/nl/subgraphs/best-practices/grafting-hotfix/ +/nl/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/nl/subgraphs/best-practices/pruning/ +/nl/subgraphs/best-practices/timeseries/ /nl/subgraphs/billing/ /nl/subgraphs/cookbook/arweave/ -/nl/subgraphs/cookbook/avoid-eth-calls/ /nl/subgraphs/cookbook/cosmos/ -/nl/subgraphs/cookbook/derivedfrom/ /nl/subgraphs/cookbook/enums/ -/nl/subgraphs/cookbook/grafting-hotfix/ /nl/subgraphs/cookbook/grafting/ -/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /nl/subgraphs/cookbook/near/ /nl/subgraphs/cookbook/polymarket/ -/nl/subgraphs/cookbook/pruning/ /nl/subgraphs/cookbook/secure-api-keys-nextjs/ /nl/subgraphs/cookbook/subgraph-debug-forking/ /nl/subgraphs/cookbook/subgraph-uncrashable/ -/nl/subgraphs/cookbook/timeseries/ /nl/subgraphs/cookbook/transfer-to-the-graph/ /nl/subgraphs/developing/creating/advanced/ /nl/subgraphs/developing/creating/assemblyscript-mappings/ @@ -996,7 +997,6 @@ /nl/subgraphs/developing/creating/subgraph-manifest/ /nl/subgraphs/developing/creating/unit-testing-framework/ /nl/subgraphs/developing/deploying/multiple-networks/ -/nl/subgraphs/developing/deploying/subgraph-studio-faq/ /nl/subgraphs/developing/deploying/using-subgraph-studio/ /nl/subgraphs/developing/developer-faq/ /nl/subgraphs/developing/introduction/ @@ -1041,32 +1041,33 @@ /pl/indexing/tooling/graphcast/ /pl/resources/benefits/ /pl/resources/glossary/ -/pl/resources/release-notes/assemblyscript-migration-guide/ -/pl/resources/release-notes/graphql-validations-migration-guide/ +/pl/resources/migration-guides/assemblyscript-migration-guide/ +/pl/resources/migration-guides/graphql-validations-migration-guide/ /pl/resources/roles/curating/ /pl/resources/roles/delegating/delegating/ /pl/resources/roles/delegating/undelegating/ +/pl/resources/subgraph-studio-faq/ /pl/resources/tokenomics/ /pl/sps/introduction/ /pl/sps/sps-faq/ /pl/sps/triggers/ /pl/sps/tutorial/ +/pl/subgraphs/best-practices/avoid-eth-calls/ +/pl/subgraphs/best-practices/derivedfrom/ +/pl/subgraphs/best-practices/grafting-hotfix/ +/pl/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/pl/subgraphs/best-practices/pruning/ +/pl/subgraphs/best-practices/timeseries/ /pl/subgraphs/billing/ /pl/subgraphs/cookbook/arweave/ -/pl/subgraphs/cookbook/avoid-eth-calls/ /pl/subgraphs/cookbook/cosmos/ -/pl/subgraphs/cookbook/derivedfrom/ /pl/subgraphs/cookbook/enums/ -/pl/subgraphs/cookbook/grafting-hotfix/ /pl/subgraphs/cookbook/grafting/ -/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /pl/subgraphs/cookbook/near/ /pl/subgraphs/cookbook/polymarket/ -/pl/subgraphs/cookbook/pruning/ /pl/subgraphs/cookbook/secure-api-keys-nextjs/ /pl/subgraphs/cookbook/subgraph-debug-forking/ /pl/subgraphs/cookbook/subgraph-uncrashable/ -/pl/subgraphs/cookbook/timeseries/ /pl/subgraphs/cookbook/transfer-to-the-graph/ /pl/subgraphs/developing/creating/advanced/ /pl/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1080,7 +1081,6 @@ /pl/subgraphs/developing/creating/subgraph-manifest/ /pl/subgraphs/developing/creating/unit-testing-framework/ /pl/subgraphs/developing/deploying/multiple-networks/ -/pl/subgraphs/developing/deploying/subgraph-studio-faq/ /pl/subgraphs/developing/deploying/using-subgraph-studio/ /pl/subgraphs/developing/developer-faq/ /pl/subgraphs/developing/introduction/ @@ -1127,32 +1127,33 @@ /pt/indexing/tooling/graphcast/ /pt/resources/benefits/ /pt/resources/glossary/ -/pt/resources/release-notes/assemblyscript-migration-guide/ -/pt/resources/release-notes/graphql-validations-migration-guide/ +/pt/resources/migration-guides/assemblyscript-migration-guide/ +/pt/resources/migration-guides/graphql-validations-migration-guide/ /pt/resources/roles/curating/ /pt/resources/roles/delegating/delegating/ /pt/resources/roles/delegating/undelegating/ +/pt/resources/subgraph-studio-faq/ /pt/resources/tokenomics/ /pt/sps/introduction/ /pt/sps/sps-faq/ /pt/sps/triggers/ /pt/sps/tutorial/ +/pt/subgraphs/best-practices/avoid-eth-calls/ +/pt/subgraphs/best-practices/derivedfrom/ +/pt/subgraphs/best-practices/grafting-hotfix/ +/pt/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/pt/subgraphs/best-practices/pruning/ +/pt/subgraphs/best-practices/timeseries/ /pt/subgraphs/billing/ /pt/subgraphs/cookbook/arweave/ -/pt/subgraphs/cookbook/avoid-eth-calls/ /pt/subgraphs/cookbook/cosmos/ -/pt/subgraphs/cookbook/derivedfrom/ /pt/subgraphs/cookbook/enums/ -/pt/subgraphs/cookbook/grafting-hotfix/ /pt/subgraphs/cookbook/grafting/ -/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /pt/subgraphs/cookbook/near/ /pt/subgraphs/cookbook/polymarket/ -/pt/subgraphs/cookbook/pruning/ /pt/subgraphs/cookbook/secure-api-keys-nextjs/ /pt/subgraphs/cookbook/subgraph-debug-forking/ /pt/subgraphs/cookbook/subgraph-uncrashable/ -/pt/subgraphs/cookbook/timeseries/ /pt/subgraphs/cookbook/transfer-to-the-graph/ /pt/subgraphs/developing/creating/advanced/ /pt/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1166,7 +1167,6 @@ /pt/subgraphs/developing/creating/subgraph-manifest/ /pt/subgraphs/developing/creating/unit-testing-framework/ /pt/subgraphs/developing/deploying/multiple-networks/ -/pt/subgraphs/developing/deploying/subgraph-studio-faq/ /pt/subgraphs/developing/deploying/using-subgraph-studio/ /pt/subgraphs/developing/developer-faq/ /pt/subgraphs/developing/introduction/ @@ -1211,32 +1211,33 @@ /ro/indexing/tooling/graphcast/ /ro/resources/benefits/ /ro/resources/glossary/ -/ro/resources/release-notes/assemblyscript-migration-guide/ -/ro/resources/release-notes/graphql-validations-migration-guide/ +/ro/resources/migration-guides/assemblyscript-migration-guide/ +/ro/resources/migration-guides/graphql-validations-migration-guide/ /ro/resources/roles/curating/ /ro/resources/roles/delegating/delegating/ /ro/resources/roles/delegating/undelegating/ +/ro/resources/subgraph-studio-faq/ /ro/resources/tokenomics/ /ro/sps/introduction/ /ro/sps/sps-faq/ /ro/sps/triggers/ /ro/sps/tutorial/ +/ro/subgraphs/best-practices/avoid-eth-calls/ +/ro/subgraphs/best-practices/derivedfrom/ +/ro/subgraphs/best-practices/grafting-hotfix/ +/ro/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/ro/subgraphs/best-practices/pruning/ +/ro/subgraphs/best-practices/timeseries/ /ro/subgraphs/billing/ /ro/subgraphs/cookbook/arweave/ -/ro/subgraphs/cookbook/avoid-eth-calls/ /ro/subgraphs/cookbook/cosmos/ -/ro/subgraphs/cookbook/derivedfrom/ /ro/subgraphs/cookbook/enums/ -/ro/subgraphs/cookbook/grafting-hotfix/ /ro/subgraphs/cookbook/grafting/ -/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /ro/subgraphs/cookbook/near/ /ro/subgraphs/cookbook/polymarket/ -/ro/subgraphs/cookbook/pruning/ /ro/subgraphs/cookbook/secure-api-keys-nextjs/ /ro/subgraphs/cookbook/subgraph-debug-forking/ /ro/subgraphs/cookbook/subgraph-uncrashable/ -/ro/subgraphs/cookbook/timeseries/ /ro/subgraphs/cookbook/transfer-to-the-graph/ /ro/subgraphs/developing/creating/advanced/ /ro/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1250,7 +1251,6 @@ /ro/subgraphs/developing/creating/subgraph-manifest/ /ro/subgraphs/developing/creating/unit-testing-framework/ /ro/subgraphs/developing/deploying/multiple-networks/ -/ro/subgraphs/developing/deploying/subgraph-studio-faq/ /ro/subgraphs/developing/deploying/using-subgraph-studio/ /ro/subgraphs/developing/developer-faq/ /ro/subgraphs/developing/introduction/ @@ -1297,32 +1297,33 @@ /ru/indexing/tooling/graphcast/ /ru/resources/benefits/ /ru/resources/glossary/ -/ru/resources/release-notes/assemblyscript-migration-guide/ -/ru/resources/release-notes/graphql-validations-migration-guide/ +/ru/resources/migration-guides/assemblyscript-migration-guide/ +/ru/resources/migration-guides/graphql-validations-migration-guide/ /ru/resources/roles/curating/ /ru/resources/roles/delegating/delegating/ /ru/resources/roles/delegating/undelegating/ +/ru/resources/subgraph-studio-faq/ /ru/resources/tokenomics/ /ru/sps/introduction/ /ru/sps/sps-faq/ /ru/sps/triggers/ /ru/sps/tutorial/ +/ru/subgraphs/best-practices/avoid-eth-calls/ +/ru/subgraphs/best-practices/derivedfrom/ +/ru/subgraphs/best-practices/grafting-hotfix/ +/ru/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/ru/subgraphs/best-practices/pruning/ +/ru/subgraphs/best-practices/timeseries/ /ru/subgraphs/billing/ /ru/subgraphs/cookbook/arweave/ -/ru/subgraphs/cookbook/avoid-eth-calls/ /ru/subgraphs/cookbook/cosmos/ -/ru/subgraphs/cookbook/derivedfrom/ /ru/subgraphs/cookbook/enums/ -/ru/subgraphs/cookbook/grafting-hotfix/ /ru/subgraphs/cookbook/grafting/ -/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /ru/subgraphs/cookbook/near/ /ru/subgraphs/cookbook/polymarket/ -/ru/subgraphs/cookbook/pruning/ /ru/subgraphs/cookbook/secure-api-keys-nextjs/ /ru/subgraphs/cookbook/subgraph-debug-forking/ /ru/subgraphs/cookbook/subgraph-uncrashable/ -/ru/subgraphs/cookbook/timeseries/ /ru/subgraphs/cookbook/transfer-to-the-graph/ /ru/subgraphs/developing/creating/advanced/ /ru/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1336,7 +1337,6 @@ /ru/subgraphs/developing/creating/subgraph-manifest/ /ru/subgraphs/developing/creating/unit-testing-framework/ /ru/subgraphs/developing/deploying/multiple-networks/ -/ru/subgraphs/developing/deploying/subgraph-studio-faq/ /ru/subgraphs/developing/deploying/using-subgraph-studio/ /ru/subgraphs/developing/developer-faq/ /ru/subgraphs/developing/introduction/ @@ -1383,32 +1383,33 @@ /sv/indexing/tooling/graphcast/ /sv/resources/benefits/ /sv/resources/glossary/ -/sv/resources/release-notes/assemblyscript-migration-guide/ -/sv/resources/release-notes/graphql-validations-migration-guide/ +/sv/resources/migration-guides/assemblyscript-migration-guide/ +/sv/resources/migration-guides/graphql-validations-migration-guide/ /sv/resources/roles/curating/ /sv/resources/roles/delegating/delegating/ /sv/resources/roles/delegating/undelegating/ +/sv/resources/subgraph-studio-faq/ /sv/resources/tokenomics/ /sv/sps/introduction/ /sv/sps/sps-faq/ /sv/sps/triggers/ /sv/sps/tutorial/ +/sv/subgraphs/best-practices/avoid-eth-calls/ +/sv/subgraphs/best-practices/derivedfrom/ +/sv/subgraphs/best-practices/grafting-hotfix/ +/sv/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/sv/subgraphs/best-practices/pruning/ +/sv/subgraphs/best-practices/timeseries/ /sv/subgraphs/billing/ /sv/subgraphs/cookbook/arweave/ -/sv/subgraphs/cookbook/avoid-eth-calls/ /sv/subgraphs/cookbook/cosmos/ -/sv/subgraphs/cookbook/derivedfrom/ /sv/subgraphs/cookbook/enums/ -/sv/subgraphs/cookbook/grafting-hotfix/ /sv/subgraphs/cookbook/grafting/ -/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /sv/subgraphs/cookbook/near/ /sv/subgraphs/cookbook/polymarket/ -/sv/subgraphs/cookbook/pruning/ /sv/subgraphs/cookbook/secure-api-keys-nextjs/ /sv/subgraphs/cookbook/subgraph-debug-forking/ /sv/subgraphs/cookbook/subgraph-uncrashable/ -/sv/subgraphs/cookbook/timeseries/ /sv/subgraphs/cookbook/transfer-to-the-graph/ /sv/subgraphs/developing/creating/advanced/ /sv/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1422,7 +1423,6 @@ /sv/subgraphs/developing/creating/subgraph-manifest/ /sv/subgraphs/developing/creating/unit-testing-framework/ /sv/subgraphs/developing/deploying/multiple-networks/ -/sv/subgraphs/developing/deploying/subgraph-studio-faq/ /sv/subgraphs/developing/deploying/using-subgraph-studio/ /sv/subgraphs/developing/developer-faq/ /sv/subgraphs/developing/introduction/ @@ -1469,32 +1469,33 @@ /tr/indexing/tooling/graphcast/ /tr/resources/benefits/ /tr/resources/glossary/ -/tr/resources/release-notes/assemblyscript-migration-guide/ -/tr/resources/release-notes/graphql-validations-migration-guide/ +/tr/resources/migration-guides/assemblyscript-migration-guide/ +/tr/resources/migration-guides/graphql-validations-migration-guide/ /tr/resources/roles/curating/ /tr/resources/roles/delegating/delegating/ /tr/resources/roles/delegating/undelegating/ +/tr/resources/subgraph-studio-faq/ /tr/resources/tokenomics/ /tr/sps/introduction/ /tr/sps/sps-faq/ /tr/sps/triggers/ /tr/sps/tutorial/ +/tr/subgraphs/best-practices/avoid-eth-calls/ +/tr/subgraphs/best-practices/derivedfrom/ +/tr/subgraphs/best-practices/grafting-hotfix/ +/tr/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/tr/subgraphs/best-practices/pruning/ +/tr/subgraphs/best-practices/timeseries/ /tr/subgraphs/billing/ /tr/subgraphs/cookbook/arweave/ -/tr/subgraphs/cookbook/avoid-eth-calls/ /tr/subgraphs/cookbook/cosmos/ -/tr/subgraphs/cookbook/derivedfrom/ /tr/subgraphs/cookbook/enums/ -/tr/subgraphs/cookbook/grafting-hotfix/ /tr/subgraphs/cookbook/grafting/ -/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /tr/subgraphs/cookbook/near/ /tr/subgraphs/cookbook/polymarket/ -/tr/subgraphs/cookbook/pruning/ /tr/subgraphs/cookbook/secure-api-keys-nextjs/ /tr/subgraphs/cookbook/subgraph-debug-forking/ /tr/subgraphs/cookbook/subgraph-uncrashable/ -/tr/subgraphs/cookbook/timeseries/ /tr/subgraphs/cookbook/transfer-to-the-graph/ /tr/subgraphs/developing/creating/advanced/ /tr/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1508,7 +1509,6 @@ /tr/subgraphs/developing/creating/subgraph-manifest/ /tr/subgraphs/developing/creating/unit-testing-framework/ /tr/subgraphs/developing/deploying/multiple-networks/ -/tr/subgraphs/developing/deploying/subgraph-studio-faq/ /tr/subgraphs/developing/deploying/using-subgraph-studio/ /tr/subgraphs/developing/developer-faq/ /tr/subgraphs/developing/introduction/ @@ -1553,32 +1553,33 @@ /uk/indexing/tooling/graphcast/ /uk/resources/benefits/ /uk/resources/glossary/ -/uk/resources/release-notes/assemblyscript-migration-guide/ -/uk/resources/release-notes/graphql-validations-migration-guide/ +/uk/resources/migration-guides/assemblyscript-migration-guide/ +/uk/resources/migration-guides/graphql-validations-migration-guide/ /uk/resources/roles/curating/ /uk/resources/roles/delegating/delegating/ /uk/resources/roles/delegating/undelegating/ +/uk/resources/subgraph-studio-faq/ /uk/resources/tokenomics/ /uk/sps/introduction/ /uk/sps/sps-faq/ /uk/sps/triggers/ /uk/sps/tutorial/ +/uk/subgraphs/best-practices/avoid-eth-calls/ +/uk/subgraphs/best-practices/derivedfrom/ +/uk/subgraphs/best-practices/grafting-hotfix/ +/uk/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/uk/subgraphs/best-practices/pruning/ +/uk/subgraphs/best-practices/timeseries/ /uk/subgraphs/billing/ /uk/subgraphs/cookbook/arweave/ -/uk/subgraphs/cookbook/avoid-eth-calls/ /uk/subgraphs/cookbook/cosmos/ -/uk/subgraphs/cookbook/derivedfrom/ /uk/subgraphs/cookbook/enums/ -/uk/subgraphs/cookbook/grafting-hotfix/ /uk/subgraphs/cookbook/grafting/ -/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /uk/subgraphs/cookbook/near/ /uk/subgraphs/cookbook/polymarket/ -/uk/subgraphs/cookbook/pruning/ /uk/subgraphs/cookbook/secure-api-keys-nextjs/ /uk/subgraphs/cookbook/subgraph-debug-forking/ /uk/subgraphs/cookbook/subgraph-uncrashable/ -/uk/subgraphs/cookbook/timeseries/ /uk/subgraphs/cookbook/transfer-to-the-graph/ /uk/subgraphs/developing/creating/advanced/ /uk/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1592,7 +1593,6 @@ /uk/subgraphs/developing/creating/subgraph-manifest/ /uk/subgraphs/developing/creating/unit-testing-framework/ /uk/subgraphs/developing/deploying/multiple-networks/ -/uk/subgraphs/developing/deploying/subgraph-studio-faq/ /uk/subgraphs/developing/deploying/using-subgraph-studio/ /uk/subgraphs/developing/developer-faq/ /uk/subgraphs/developing/introduction/ @@ -1639,32 +1639,33 @@ /ur/indexing/tooling/graphcast/ /ur/resources/benefits/ /ur/resources/glossary/ -/ur/resources/release-notes/assemblyscript-migration-guide/ -/ur/resources/release-notes/graphql-validations-migration-guide/ +/ur/resources/migration-guides/assemblyscript-migration-guide/ +/ur/resources/migration-guides/graphql-validations-migration-guide/ /ur/resources/roles/curating/ /ur/resources/roles/delegating/delegating/ /ur/resources/roles/delegating/undelegating/ +/ur/resources/subgraph-studio-faq/ /ur/resources/tokenomics/ /ur/sps/introduction/ /ur/sps/sps-faq/ /ur/sps/triggers/ /ur/sps/tutorial/ +/ur/subgraphs/best-practices/avoid-eth-calls/ +/ur/subgraphs/best-practices/derivedfrom/ +/ur/subgraphs/best-practices/grafting-hotfix/ +/ur/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/ur/subgraphs/best-practices/pruning/ +/ur/subgraphs/best-practices/timeseries/ /ur/subgraphs/billing/ /ur/subgraphs/cookbook/arweave/ -/ur/subgraphs/cookbook/avoid-eth-calls/ /ur/subgraphs/cookbook/cosmos/ -/ur/subgraphs/cookbook/derivedfrom/ /ur/subgraphs/cookbook/enums/ -/ur/subgraphs/cookbook/grafting-hotfix/ /ur/subgraphs/cookbook/grafting/ -/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /ur/subgraphs/cookbook/near/ /ur/subgraphs/cookbook/polymarket/ -/ur/subgraphs/cookbook/pruning/ /ur/subgraphs/cookbook/secure-api-keys-nextjs/ /ur/subgraphs/cookbook/subgraph-debug-forking/ /ur/subgraphs/cookbook/subgraph-uncrashable/ -/ur/subgraphs/cookbook/timeseries/ /ur/subgraphs/cookbook/transfer-to-the-graph/ /ur/subgraphs/developing/creating/advanced/ /ur/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1678,7 +1679,6 @@ /ur/subgraphs/developing/creating/subgraph-manifest/ /ur/subgraphs/developing/creating/unit-testing-framework/ /ur/subgraphs/developing/deploying/multiple-networks/ -/ur/subgraphs/developing/deploying/subgraph-studio-faq/ /ur/subgraphs/developing/deploying/using-subgraph-studio/ /ur/subgraphs/developing/developer-faq/ /ur/subgraphs/developing/introduction/ @@ -1723,32 +1723,33 @@ /vi/indexing/tooling/graphcast/ /vi/resources/benefits/ /vi/resources/glossary/ -/vi/resources/release-notes/assemblyscript-migration-guide/ -/vi/resources/release-notes/graphql-validations-migration-guide/ +/vi/resources/migration-guides/assemblyscript-migration-guide/ +/vi/resources/migration-guides/graphql-validations-migration-guide/ /vi/resources/roles/curating/ /vi/resources/roles/delegating/delegating/ /vi/resources/roles/delegating/undelegating/ +/vi/resources/subgraph-studio-faq/ /vi/resources/tokenomics/ /vi/sps/introduction/ /vi/sps/sps-faq/ /vi/sps/triggers/ /vi/sps/tutorial/ +/vi/subgraphs/best-practices/avoid-eth-calls/ +/vi/subgraphs/best-practices/derivedfrom/ +/vi/subgraphs/best-practices/grafting-hotfix/ +/vi/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/vi/subgraphs/best-practices/pruning/ +/vi/subgraphs/best-practices/timeseries/ /vi/subgraphs/billing/ /vi/subgraphs/cookbook/arweave/ -/vi/subgraphs/cookbook/avoid-eth-calls/ /vi/subgraphs/cookbook/cosmos/ -/vi/subgraphs/cookbook/derivedfrom/ /vi/subgraphs/cookbook/enums/ -/vi/subgraphs/cookbook/grafting-hotfix/ /vi/subgraphs/cookbook/grafting/ -/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /vi/subgraphs/cookbook/near/ /vi/subgraphs/cookbook/polymarket/ -/vi/subgraphs/cookbook/pruning/ /vi/subgraphs/cookbook/secure-api-keys-nextjs/ /vi/subgraphs/cookbook/subgraph-debug-forking/ /vi/subgraphs/cookbook/subgraph-uncrashable/ -/vi/subgraphs/cookbook/timeseries/ /vi/subgraphs/cookbook/transfer-to-the-graph/ /vi/subgraphs/developing/creating/advanced/ /vi/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1762,7 +1763,6 @@ /vi/subgraphs/developing/creating/subgraph-manifest/ /vi/subgraphs/developing/creating/unit-testing-framework/ /vi/subgraphs/developing/deploying/multiple-networks/ -/vi/subgraphs/developing/deploying/subgraph-studio-faq/ /vi/subgraphs/developing/deploying/using-subgraph-studio/ /vi/subgraphs/developing/developer-faq/ /vi/subgraphs/developing/introduction/ @@ -1809,32 +1809,33 @@ /zh/indexing/tooling/graphcast/ /zh/resources/benefits/ /zh/resources/glossary/ -/zh/resources/release-notes/assemblyscript-migration-guide/ -/zh/resources/release-notes/graphql-validations-migration-guide/ +/zh/resources/migration-guides/assemblyscript-migration-guide/ +/zh/resources/migration-guides/graphql-validations-migration-guide/ /zh/resources/roles/curating/ /zh/resources/roles/delegating/delegating/ /zh/resources/roles/delegating/undelegating/ +/zh/resources/subgraph-studio-faq/ /zh/resources/tokenomics/ /zh/sps/introduction/ /zh/sps/sps-faq/ /zh/sps/triggers/ /zh/sps/tutorial/ +/zh/subgraphs/best-practices/avoid-eth-calls/ +/zh/subgraphs/best-practices/derivedfrom/ +/zh/subgraphs/best-practices/grafting-hotfix/ +/zh/subgraphs/best-practices/immutable-entities-bytes-as-ids/ +/zh/subgraphs/best-practices/pruning/ +/zh/subgraphs/best-practices/timeseries/ /zh/subgraphs/billing/ /zh/subgraphs/cookbook/arweave/ -/zh/subgraphs/cookbook/avoid-eth-calls/ /zh/subgraphs/cookbook/cosmos/ -/zh/subgraphs/cookbook/derivedfrom/ /zh/subgraphs/cookbook/enums/ -/zh/subgraphs/cookbook/grafting-hotfix/ /zh/subgraphs/cookbook/grafting/ -/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids/ /zh/subgraphs/cookbook/near/ /zh/subgraphs/cookbook/polymarket/ -/zh/subgraphs/cookbook/pruning/ /zh/subgraphs/cookbook/secure-api-keys-nextjs/ /zh/subgraphs/cookbook/subgraph-debug-forking/ /zh/subgraphs/cookbook/subgraph-uncrashable/ -/zh/subgraphs/cookbook/timeseries/ /zh/subgraphs/cookbook/transfer-to-the-graph/ /zh/subgraphs/developing/creating/advanced/ /zh/subgraphs/developing/creating/assemblyscript-mappings/ @@ -1848,7 +1849,6 @@ /zh/subgraphs/developing/creating/subgraph-manifest/ /zh/subgraphs/developing/creating/unit-testing-framework/ /zh/subgraphs/developing/deploying/multiple-networks/ -/zh/subgraphs/developing/deploying/subgraph-studio-faq/ /zh/subgraphs/developing/deploying/using-subgraph-studio/ /zh/subgraphs/developing/developer-faq/ /zh/subgraphs/developing/introduction/ diff --git a/website/src/Layout.tsx b/website/src/Layout.tsx index 029d8c9c9583..618db31baffd 100644 --- a/website/src/Layout.tsx +++ b/website/src/Layout.tsx @@ -318,7 +318,7 @@ export default function Layout({ pageOpts, children }: NextraThemeLayoutProps ملاحظة: اعتبارًا من `0.24.0` ، يمكن أن يدعم `grapg-node` كلا الإصدارين ، اعتمادًا على `apiVersion` المحدد في Subgraph manifest. - -## مميزات - -### وظائف جديدة - -- يمكن الآن إنشاء `TypedArray` من `ArrayBuffer` باستخدام [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) جديد -- دوال المكتبة القياسية الجديدة`String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- تمت إضافة دعم لـ x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- تمت إضافة `StaticArray` متغير مصفوفة أكثر كفاءة([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- تمت إضافة`Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- تم تنفيذ`radix` argument على `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- دعم إضافي للفواصل في floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- دعم إضافي لدوال الفئة الأولى ([ v0.14.0 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- إضافة البناء: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- تنفيذ `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- دعم إضافي لقوالب literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- أضف`encodeURI(Component)` و `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- أضافة `toString`, `toDateString` و `toTimeString` إلى `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- أضافة `toUTCString` ل `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- أضافة `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### التحسينات - -- دوال `Math` مثل `exp`, `exp2`, `log`, `log2` و `pow` تم استبدالها بمتغيرات أسرع ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1))أكثر تحسينا -- تخزين المزيد من الوصول للحقول في std Map و Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- قم بتحسين قدرات اثنين في `ipow32 / 64` ([ v0.18.2 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### آخر - -- يمكن الآن استنتاج نوع array literal من محتوياتها([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- تم تحديث stdlib إلى Unicode 13.0.0([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## كيف تقوم بالترقية؟ - -1. تغيير `apiVersion` Mappings الخاص بك في `subgraph.yaml` إلى `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. قم بتحديث `graph-cli` الذي تستخدمه إلى `latest` عن طريق تشغيل: - -```bash -# if you have it globally installed -npm install --global @graphprotocol/graph-cli@latest - -# or in your subgraph if you have it as a dev dependency -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. افعل الشيء نفسه مع `graph-ts` ، ولكن بدلاً من التثبيت بشكل عام ، احفظه في dependencies الرئيسية: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. اتبع بقية الدليل لاصلاح التغييرات الهامة في اللغة. -5. قم بتشغيل `codegen` و `deploy` مرة أخرى. - -## تغييرات هامة - -### Nullability - -في الإصدار الأقدم من AssemblyScript ، يمكنك إنشاء كود مثل هذا: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -ولكن في الإصدار الأحدث ، نظرًا لأن القيمة nullable ، فإنها تتطلب منك التحقق ، مثل هذا: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -أو إجباره على هذا النحو: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -إذا لم تكن متأكدا من اختيارك ، فنحن نوصي دائما باستخدام الإصدار الآمن. إذا كانت القيمة غير موجودة ، فقد ترغب في القيام بعبارة if المبكرة مع قيمة راجعة في معالج الـ subgraph الخاص بك. - -### Variable Shadowing - -قبل أن تتمكن من إجراء [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) وتعمل تعليمات برمجية مثل هذه: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -لكن هذا لم يعد ممكنًا الآن ، ويرجع المترجم بهذا الخطأ: - -```typescript -'ERROR TS2451: Cannot redeclare block-scoped variable 'a - -; let a = a + b - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -ستحتاج إلى إعادة تسمية المتغيرات المكررة إذا كان لديك variable shadowing. - -### مقارانات Null - -من خلال إجراء الترقية على ال Subgraph الخاص بك ، قد تحصل أحيانًا على أخطاء مثل هذه: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -لحل المشكلة يمكنك ببساطة تغيير عبارة `if` إلى شيء مثل هذا: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -الأمر نفسه ينطبق إذا كنت تفعل =! بدلاً من ==. - -### Casting - -كانت الطريقة الشائعة لإجراء ال Casting من قبل هي استخدام كلمة `as` ، مثل هذا: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -لكن هذا لا يعمل إلا في سيناريوهين: - -- Primitive casting (بين انواع مثل`u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- Upcasting على وراثة الفئة (subclass → superclass) - -أمثلة: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: - -- Downcasting وراثة الفئة (superclass → subclass) -- بين نوعين يشتركان في فئة superclass - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -في هذه الحالة يمكنك إستخدام دالة `changetype`: - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -إذا كنت تريد فقط إزالة nullability ، فيمكنك الاستمرار في استخدام `as` (أو `variable`) ، ولكن تأكد من أنك تعرف أن القيمة لا يمكن أن تكون خالية ، وإلا فإنه سوف يتوقف. - -```typescript -// remove nullability -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -بالنسبة لحالة ال nullability ، نوصي بإلقاء نظرة على [ مميزة التحقق من nullability ](https://www.assemblyscript.org/basics.html#nullability-checks) ، ستجعل الكود أكثر وضوحا🙂 - -Also we've added a few more static methods in some types to ease casting, they are: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### التحقق من Nullability مع الوصول الى الخاصية - -لاستخدام [ مميزة التحقق من nullability ](https://www.assemblyscript.org/basics.html#nullability-checks) ، يمكنك استخدام عبارات `if` أو عامل التشغيل الثلاثي (`؟` و`:`) مثل هذا: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// or - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -ومع ذلك ، فإن هذا لا يعمل إلا عند تنفيذ `if` / ternary على متغير ، وليس على الوصول للخاصية ، مثل هذا: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile -``` - -الذي يخرج هذا الخطأ: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -لإصلاح هذه المشكلة ، يمكنك إنشاء متغير للوصول إلى الخاصية حتى يتمكن المترجم من القيام بعملية التحقق من الـ nullability: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) -``` - -### التحميل الزائد للمشغل مع الوصول للخاصية - -(على سبيل المثال) إذا حاولت جمع نوع nullable (من وصول خاصية) مع نوع nullable ، فإن مترجم AssemblyScript بدلاً من إعطاء خطأ في وقت التحويل يحذر من أن إحدى القيم nullable، وبما أنه يقوم فقط بالترجمة بصمت ، فإنه يتيح الفرصة للكود للتوقف في وقت التشغيل. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should -``` - -لقد فتحنا مشكلة في مترجم AssemblyScript ، ولكن في الوقت الحالي إذا أجريت هذا النوع من العمليات في Subgraph mappings ، فيجب عليك تغييرها لإجراء فحص ل null قبل ذلك. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt -``` - -### تهيئة القيمة - -إذا كان لديك أي كود مثل هذا: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -سيتم تجميعها لكنها ستتوقف في وقت التشغيل ، وهذا يحدث لأن القيمة لم تتم تهيئتها ، لذا تأكد من أن ال subgraph قد قام بتهيئة قيمها ، على النحو التالي: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -وأيضًا إذا كانت لديك خصائص ل nullable في كيان GraphQL ، مثل هذا: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -ولديك كود مشابه لهذا: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -فستحتاج إلى التأكد من تهيئة قيمة `total.amount` ، لأنه إذا حاولت الوصول كما في السطر الأخير للمجموع ، فسوف يتعطل. لذلك إما أن تقوم بتهيئته أولاً: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -أو يمكنك فقط تغيير مخطط GraphQL الخاص بك بحيث لا تستخدم نوع nullable لهذه الخاصية ، ثم سنقوم بتهيئته على أنه صفر في الخطوة`codegen`😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // already initializes non-nullable properties -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### تهيئة خاصية الفئة - -إذا قمت بتصدير (export) أي فئات ذات خصائص فئات أخرى (تم تعريفها بواسطتك أو بواسطة المكتبة القياسية) مثل هذا: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -فإن المترجم سيخطئ لأنك ستحتاج إما لإضافة مُهيئ للخصائص والتي هي فئات (classes)، أو إضافة عامل التشغيل `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### Array initialization - -The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -To actually push at the beginning you should either, initialize the `Array` with size zero, like this: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -Or you should mutate it via index: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### GraphQL schema - -This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. - -Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -You'll have to add an `!` to the member of the List type, like this: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). - -### آخر - -- تم ضبط `Map#set` و`Set#add` مع المواصفات ، راجعا بـ `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- لم تعد المصفوفة ترث من ArrayBufferView ، لكنها أصبحت متميزة الآن ([ v0.10.0 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- الفئات المهيئة من كائن لم يعد بإمكانها تعريف باني (constructor) لها ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- نتيجة العملية الثنائية `**` هي الآن العدد الصحيح للمقام المشترك إذا كان كلا المعاملين عددا صحيحا. في السابق كانت النتيجة float كما لو كان استدعاء `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- إجبار`NaN` إلى `false` عندما ال casting إلى`bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- عند إزاحة قيمة عدد صحيح صغير من النوع `i8`/`u8` أو `i16`/`u16` ، فإن فقط الـ 3 على التوالي لـ 4 بتات الأقل أهمية من قيمة RHS تؤثر على النتيجة ، على غرار نتيجة `i32.shl` المتأثرة فقط بالـ 5 بتات الأقل أهمية من قيمة RHS.. مثال: `someI8 << 8` أنتج سابقًا القيمة `0` ، ولكنه ينتج الآن `SomeI8` نظرًا لإخفاء RHS كـ `8 & 7 = 0`(3 بت) ([ v0.17.0 ](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- إصلاح خلل مقارنات السلاسل العلائقية (relational string) عندما تختلف الأحجام ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/en/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ar/resources/subgraph-studio-faq.mdx similarity index 100% rename from website/src/pages/en/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/ar/resources/subgraph-studio-faq.mdx diff --git a/website/src/pages/ar/subgraphs/_meta-titles.json b/website/src/pages/ar/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/ar/subgraphs/_meta-titles.json +++ b/website/src/pages/ar/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/ar/subgraphs/_meta.js b/website/src/pages/ar/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/ar/subgraphs/_meta.js +++ b/website/src/pages/ar/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/ar/subgraphs/best-practices/_meta.js b/website/src/pages/ar/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/ar/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/en/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ar/subgraphs/best-practices/avoid-eth-calls.mdx similarity index 100% rename from website/src/pages/en/subgraphs/cookbook/avoid-eth-calls.mdx rename to website/src/pages/ar/subgraphs/best-practices/avoid-eth-calls.mdx diff --git a/website/src/pages/en/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ar/subgraphs/best-practices/derivedfrom.mdx similarity index 100% rename from website/src/pages/en/subgraphs/cookbook/derivedfrom.mdx rename to website/src/pages/ar/subgraphs/best-practices/derivedfrom.mdx diff --git a/website/src/pages/en/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ar/subgraphs/best-practices/grafting-hotfix.mdx similarity index 100% rename from website/src/pages/en/subgraphs/cookbook/grafting-hotfix.mdx rename to website/src/pages/ar/subgraphs/best-practices/grafting-hotfix.mdx diff --git a/website/src/pages/en/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ar/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx similarity index 100% rename from website/src/pages/en/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx rename to website/src/pages/ar/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx diff --git a/website/src/pages/en/subgraphs/cookbook/pruning.mdx b/website/src/pages/ar/subgraphs/best-practices/pruning.mdx similarity index 100% rename from website/src/pages/en/subgraphs/cookbook/pruning.mdx rename to website/src/pages/ar/subgraphs/best-practices/pruning.mdx diff --git a/website/src/pages/en/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ar/subgraphs/best-practices/timeseries.mdx similarity index 100% rename from website/src/pages/en/subgraphs/cookbook/timeseries.mdx rename to website/src/pages/ar/subgraphs/best-practices/timeseries.mdx diff --git a/website/src/pages/ar/subgraphs/cookbook/_meta.js b/website/src/pages/ar/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/ar/subgraphs/cookbook/_meta.js +++ b/website/src/pages/ar/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 2bbe296c724a..000000000000 --- a/website/src/pages/ar/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### نظره عامة - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## مصادر إضافية - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ar/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ar/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 03ac4f323fa4..000000000000 --- a/website/src/pages/ar/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## نظره عامة - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Example: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Example: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Example: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Example: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ar/subgraphs/developing/deploying/_meta.js b/website/src/pages/ar/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/ar/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/ar/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/ar/subgraphs/developing/publishing/_meta.js b/website/src/pages/ar/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/ar/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/ar/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/ar/subgraphs/querying/_meta.js b/website/src/pages/ar/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/ar/subgraphs/querying/_meta.js +++ b/website/src/pages/ar/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/cs/resources/_meta-titles.json b/website/src/pages/cs/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/cs/resources/_meta-titles.json +++ b/website/src/pages/cs/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/cs/resources/_meta.js b/website/src/pages/cs/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/cs/resources/_meta.js +++ b/website/src/pages/cs/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/cs/resources/release-notes/_meta.js b/website/src/pages/cs/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/cs/resources/release-notes/_meta.js rename to website/src/pages/cs/resources/migration-guides/_meta.js diff --git a/website/src/pages/ko/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/cs/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 100% rename from website/src/pages/ko/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/cs/resources/migration-guides/assemblyscript-migration-guide.mdx diff --git a/website/src/pages/ar/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/cs/resources/migration-guides/graphql-validations-migration-guide.mdx similarity index 99% rename from website/src/pages/ar/resources/release-notes/graphql-validations-migration-guide.mdx rename to website/src/pages/cs/resources/migration-guides/graphql-validations-migration-guide.mdx index 4d909e8970a8..29fed533ef8c 100644 --- a/website/src/pages/ar/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/cs/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -1,5 +1,5 @@ --- -title: GraphQL Validations migration guide +title: GraphQL Validations Migration Guide --- Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). diff --git a/website/src/pages/cs/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/cs/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index d1b9eb00bc04..000000000000 --- a/website/src/pages/cs/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: Průvodce migrací AssemblyScript ---- - -Dosud se pro subgrafy používala jedna z [prvních verzí AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Konečně jsme přidali podporu pro [nejnovější dostupnou verzi](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 - -To umožní vývojářům podgrafů používat novější funkce jazyka AS a standardní knihovny. - -Tato příručka platí pro všechny, kteří používají `graph-cli`/`graph-ts` pod verzí `0.22.0`. Pokud již máte vyšší (nebo stejnou) verzi, používáte již verzi `0.19.10` AssemblyScript 🙂 - -> Poznámka: Od verze `0.24.0` může `graph-node` podporovat obě verze v závislosti na `apiVersion` uvedené v manifestu podgrafu. - -## Funkce - -### Nové funkce - -- `TypedArray`s lze nyní sestavit z `ArrayBuffer`s pomocí [nové `wrap` statické metody](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nové funkce standardní knihovny: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`a `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Přidána podpora pro x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Přidána `StaticArray`, efektivnější varianta pole ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Přidáno `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementován argument `radix` na `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Přidána podpora oddělovačů v literálech s plovoucí desetinnou čárkou ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Přidána podpora funkcí první třídy ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Přidání vestavěných: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementovat `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Přidána podpora literálních řetězců šablon ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Přidat `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Přidat `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Přidat `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Přidat `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Optimalizace - -- `Math` funkce jako `exp`, `exp2`, `log`, `log2` a `pow` byly nahrazeny rychlejšími variantami ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Mírná optimalizace `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Vyrovnávací paměť pro více přístupů k polím std Map a Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimalizace pro mocniny dvou v `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Jiný - -- Typ literálu pole lze nyní odvodit z jeho obsahu ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Aktualizace stdlib na Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## Jak provést upgrade? - -1. Změňte mapování `apiVersion` v `subgraph.yaml` na `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Aktualizujte používaný `graph-cli` na `nejnovější` verzi spuštěním: - -```bash -# if you have it globally installed -npm install --global @graphprotocol/graph-cli@latest - -# or in your subgraph if you have it as a dev dependency -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Totéž proveďte pro `graph-ts`, ale místo globální instalace jej uložte do hlavních závislostí: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Postupujte podle zbytku příručky a opravte změny, které narušují jazyk. -5. Znovu spusťte `codegen` a `deploy`. - -## Prolomení změn - -### Nullability - -Ve starší verzi AssemblyScript bylo možné vytvořit kód takto: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -V novější verzi je však hodnota nulovatelná a je nutné ji zkontrolovat takto: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -Nebo si to vynuťte takto: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -Pokud si nejste jisti, kterou verzi zvolit, doporučujeme vždy použít bezpečnou verzi. Pokud hodnota neexistuje, možná budete chtít provést pouze časný příkaz if s návratem v obsluze podgrafu. - -### Proměnlivé stínování - -Dříve jste mohli udělat [stínování proměnné](https://en.wikipedia.org/wiki/Variable_shadowing) a kód jako tento by fungoval: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Nyní to však již není možné a překladač vrací tuto chybu: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -Pokud jste použili stínování proměnných, musíte duplicitní proměnné přejmenovat. - -### Nulová srovnání - -Při aktualizaci podgrafu může někdy dojít k těmto chybám: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -Pro vyřešení můžete jednoduše změnit příkaz `if` na něco takového: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -Totéž platí, pokud místo == použijete !=. - -### Casting - -Dříve se běžně používalo klíčové slovo `jako`, například takto: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -To však funguje pouze ve dvou případech: - -- Primitivní casting (mezi typy jako `u8`, `i32`, `bool`; např: `let b: isize = 10; b jako usize`); -- Upcasting na dědičnost tříd (podtřída → nadtřída) - -Příklady: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -Existují dva scénáře, kdy můžete chtít provést obsazení, ale použití `jako`/`var` **není bezpečné**: - -- Downcasting při dědění tříd (nadtřída → podtřída) -- Mezi dvěma typy, které mají společnou nadtřídu - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -Pro tyto případy můžete použít funkci `changetype`: - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -Pokud chcete pouze odstranit nullability, můžete nadále používat operátor `jako` (nebo `proměnná`), ale ujistěte se, že hodnota nemůže být nulová, jinak dojde k rozbití. - -```typescript -// remove nullability -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -Pro případ nullability doporučujeme podívat se na funkci [kontrola nullability](https://www.assemblyscript.org/basics.html#nullability-checks), díky ní bude váš kód čistší 🙂 - -Také jsme přidali několik dalších statických metod v některých typy abychom usnadnili odlévání, jsou to: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Kontrola nulovatelnosti s přístupem k vlastnostem - -Chcete-li použít funkci [kontroly nulovatelnosti](https://www.assemblyscript.org/basics.html#nullability-checks), můžete použít buď příkazy `if`, nebo ternární operátor (`?` a `:`), například takto: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// or - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -To však funguje pouze tehdy, když provádíte `if` / ternár na proměnné, nikoli na přístupu k vlastnosti, jako je tento: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile -``` - -Který vypíše tuto chybu: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -Chcete-li tento problém vyřešit, můžete vytvořit proměnnou pro přístup k této vlastnosti, aby překladač mohl provést kouzlo kontroly nulovatelnosti: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) -``` - -### Přetěžování operátorů s přístupem k vlastnostem - -Pokud se pokusíte sečíst (například) nullable typ (z přístupu k vlastnosti) s typem, který nullable není, kompilátor jazyka AssemblyScript namísto toho, aby při kompilaci zobrazil varování, že jedna z hodnot je nullable, provede tichou kompilaci, čímž se kód může za běhu pokazit. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should -``` - -Otevřeli jsme kvůli tomu problém v kompilátoru jazyka AssemblyScript, ale zatím platí, že pokud provádíte tyto operace v mapování podgrafů, měli byste je změnit tak, aby se před nimi provedla kontrola null. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt -``` - -### Inicializace hodnot - -Pokud máte nějaký takový kód: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Zkompiluje se, ale za běhu se přeruší, což se stane, protože hodnota nebyla inicializována, takže se ujistěte, že váš podgraf inicializoval své hodnoty, například takto: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -Také pokud máte v entitě GraphQL nulovatelné vlastnosti, jako je tato: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -A máte kód podobný tomuto: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -Musíte se ujistit, že jste inicializovali hodnotu `total.amount`, protože pokud se pokusíte přistupovat jako v posledním řádku pro součet, dojde k pádu. Takže ji buď nejprve inicializujte: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -Nebo můžete změnit své schéma GraphQL tak, aby nepoužívalo nulovatelný typ pro tuto vlastnost, pak ji inicializujeme jako nulu v kroku `codegen` 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // already initializes non-nullable properties -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Inicializace vlastností třídy - -Pokud exportujete třídy s vlastnostmi, které jsou jinými třídami (deklarovanými vámi nebo standardní knihovnou), jako je tento: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -Překladač bude chybovat, protože buď musíte přidat inicializátor pro vlastnosti, které jsou třídami, nebo přidat operátor `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### Inicializace polí - -Třída `Array` stále přijímá číslo pro inicializaci délky seznamu, ale měli byste si dát pozor, protože operace jako `.push` ve skutečnosti zvětší velikost, místo aby například přidávala na začátek: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -V závislosti na typech, které používáte, např. nulovatelných, a na způsobu přístupu k nim se můžete setkat s chybou běhu, jako je tato: - -``` -ERRO Handler přeskočen z důvodu selhání provádění, chyba: Mapování přerušeno na ~lib/array.ts, řádek 110, sloupec 40, se zprávou: Typ prvku musí být nulovatelný, pokud je pole děravé wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -Chcete-li skutečně tlačit na začátku, měli byste buď inicializovat `Array` s velikostí nula, například takto: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -Nebo byste ho měli zmutovat pomocí indexu: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### Schéma GraphQL - -Nejedná se o přímou změnu AssemblyScript, ale možná budete muset aktualizovat soubor `schema.graphql`. - -Nyní již nelze v typech definovat pole, která jsou nenulovatelnými seznamy. Pokud máte takovéto schéma: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -Budete muset přidat `!` k členu typu List, například takto: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -To se změnilo kvůli rozdílům v nullability mezi verzemi AssemblyScript a souvisí to se souborem `src/generated/schema.ts` (výchozí cesta, možná jste ji změnili). - -### Jiný - -- Zarovnání `Map#set` a `Set#add` se specifikací, vrácení `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Pole již nedědí od ArrayBufferView, ale jsou nyní samostatná ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Třídy inicializované z objektových literálů již nemohou definovat konstruktor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Výsledkem binární operace `**` je nyní celé číslo se společným jmenovatelem, pokud jsou oba operandy celá čísla. Dříve byl výsledkem float, jako kdybyste volali `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Vynucení `NaN` na `false` při převodu na `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Při posunu malé celočíselné hodnoty typu `i8`/`u8` nebo `i16`/`u16` ovlivňují výsledek pouze 3, resp. 4 nejméně významné bity hodnoty RHS, obdobně jako výsledek `i32.shl` ovlivňuje pouze 5 nejméně významných bitů hodnoty RHS. Příklad: `someI8 << 8` dříve dávalo hodnotu `0`, ale nyní dává `someI8` kvůli maskování RHS jako `8 & 7 = 0` (3 bity) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Oprava chyb při porovnávání relačních řetězců při rozdílných velikostech ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/cs/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/cs/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 7f273724aff4..000000000000 --- a/website/src/pages/cs/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: Průvodce migrací na GraphQL Validace ---- - -Brzy bude `graph-node` podporovat 100% pokrytí [GraphQL Validations specifikace](https://spec.graphql.org/June2018/#sec-Validation). - -Předchozí verze `graph-node` nepodporovaly všechny validace a neposkytovaly šetrnější odpovědi - v případě nejednoznačnosti tak `graph-node` ignoroval neplatné komponenty operací GraphQL. - -Podpora ověřování GraphQL je pilířem pro nadcházející nové funkce a výkon v měřítku Síť Graph. - -Zajistí také determinismus odpovědí na dotazy, což je klíčový požadavek sítě Graf. - -**Povolení ověřování GraphQL naruší některé existující dotazy** odeslané do Grafu API. - -Chcete-li být v souladu s těmito validacemi, postupujte podle průvodce migrací. - -> ⚠️ Pokud neprovedete migraci dotazů před zavedením validací, budou vracet chyby a možná rozbijí vaše frontends/klienty. - -## Průvodce migrací - -Pomocí migračního nástroje CLI můžete najít případné problémy v operacích GraphQL a opravit je. Případně můžete aktualizovat koncový bod svého klienta GraphQL tak, aby používal koncový bod `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`. Testování dotazů proti tomuto koncovému bodu vám pomůže najít problémy ve vašich dotazech. - -> Není nutné migrovat všechny podgrafy, pokud používáte [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) nebo [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), ty již zajistí, že vaše dotazy jsou platné. - -## Migrační nástroj CLI - -**Většinu chyb při operacích GraphQL můžete najít ve své kódové základně předem.** - -Z tohoto důvodu poskytujeme hladký průběh ověřování operací GraphQL během vývoje nebo v CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) je jednoduchý nástroj CLI, který pomáhá ověřovat operace GraphQL proti danému schéma. - -### **začínáme** - -Nástroj můžete spustit následujícím způsobem: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Poznámky:** - -- Nastavte nebo nahraďte $GITHUB_USER, $SUBGRAPH_NAME příslušnými hodnotami. Jako např: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- Poskytnutá adresa URL náhledového schématu (https://api-next.thegraph.com/) je silně omezená a po přechodu všech uživatelů na novou verzi bude ukončena. **Nepoužívejte jej v produkčním provozu** -- Operace jsou identifikovány v souborech s následujícími příponami [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### Výstup CLI - -Nástroj `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI vypíše všechny chyby operací GraphQL takto: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -U každé chyby naleznete popis, cestu a pozici souboru a odkaz na příklad řešení (viz následující část). - -## Spouštění místních dotazů proti schéma náhledu - -Poskytujeme koncový bod `https://api-next.thegraph.com/`, který spouští verzi `graph-node` se zapnutými validacemi. - -Dotazy si můžete vyzkoušet zasláním na: - -- `https://api-next.thegraph.com/subgraphs/id/` - -nebo - -- `https://api-next.thegraph.com/subgraphs/name//` - -Chcete-li pracovat s dotazy, které byly označeny jako dotazy s chybami validace, můžete použít svůj oblíbený nástroj pro dotazy GraphQL, například Altair nebo [GraphiQL](https://cloud.hasura.io/public/graphiql), a vyzkoušet svůj dotaz. Tyto nástroje také tyto chyby označí ve svém uživatelském rozhraní, a to ještě předtím, než jej spustíte. - -## Jak řešit problémy - -Níže naleznete všechny chyby validace GraphQL, které se mohou vyskytnout u vašich stávajících operací GraphQL. - -### Proměnné, operace, fragmenty nebo argumenty jazyka GraphQL musí být jedinečné - -Použili jsme pravidla pro zajištění toho, aby operace obsahovala jedinečnou sadu proměnných GraphQL, operací, fragmentů a argumentů. - -Operace GraphQL je platná pouze tehdy, pokud neobsahuje žádnou nejednoznačnost. - -Abychom toho dosáhli, musíme zajistit, aby některé součásti operace GraphQL byly jedinečné. - -Zde je příklad několika neplatných operací, které porušují tato pravidla: - -**Duplicitní název dotazu (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Řešení:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicitní název fragmentu (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Řešení:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicitní název proměnné (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Řešení:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Řešení:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicitní anonymní dotaz (#LoneAnonymousOperationRule)** - -Použitím dvou anonymních operací se také poruší pravidlo `LoneAnonymousOperation` kvůli konfliktu ve struktuře odpovědi: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Řešení:_ - -```graphql -query { - someField - otherField -} -``` - -Nebo tyto dva dotazy pojmenujte: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Překrývající pole - -Výběrová sada GraphQL je považována za platnou, pouze pokud správně řeší případnou sadu výsledků. - -Pokud konkrétní výběrová sada nebo pole způsobí nejednoznačnost buď vybraného pole, nebo použitých argumentů, služba GraphQL operaci neověří. - -Zde je několik příkladů neplatných operací, které toto pravidlo porušují: - -**Překrývající se aliasy polí (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Řešení:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Konfliktní pole s argumenty (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Řešení:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Ve složitějších případech použití můžete toto pravidlo porušit také použitím dvou fragmentů, které by mohly způsobit konflikt v očekávané sadě: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -Kromě toho mohou vést k nejasnostem například direktivy GraphQL na straně klienta jako `@skip` a `@include`: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[Více informací o algoritmu najdete zde.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Nepoužívané proměnné nebo fragmenty - -Operace GraphQL je také považována za platnou, pouze pokud jsou použity všechny součásti definované operací (proměnné, fragmenty). - -Zde je několik příkladů operací GraphQL, které tato pravidla porušují: - -**Nepoužitá proměnná** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Řešení:_ - -```graphql -query something { - someData -} -``` - -**Nepoužitý fragment**(#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Řešení:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Neplatná nebo chybějící výběrová sada (#ScalarLeafsRule) - -Výběr pole GraphQL je také platný pouze v případě, že je potvrzeno následující: - -- Pole objektu musí mít zadanou výběrovou sadu. -- Okrajové pole (skalár, enum) nesmí mít zadanou výběrovou sadu. - -Zde je několik příkladů porušení těchto pravidel s následujícím schématem: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Neplatná výběrová sada** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Řešení:_ - -```graphql -query { - user { - id - } -} -``` - -**Chybějící výběrová sada** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Řešení:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Nesprávné hodnoty argumentů (#VariablesInAllowedPositionRule) - -Operace GraphQL, které předávají pevně zadané hodnoty argumentů, musí být platné na základě hodnoty definované ve schéma. - -Zde je několik příkladů neplatných operací, které porušují tato pravidla: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Neznámý typ, proměnná, fragment nebo směrnice (#UnknownX) - -Pokud je použit neznámý typ, proměnná, fragment nebo direktiva, rozhraní GraphQL API vyhodí chybu. - -Tyto neznámé odkazy je třeba opravit: - -- přejmenovat, pokud se jedná o překlep -- v opačném případě odstraňte - -### Fragment: neplatné rozšíření nebo definice - -**Neplatné rozložení fragmentů (#PossibleFragmentSpreadsRule)** - -Fragment nelze rozložit na nepoužitelný typ. - -Příklad: fragment `Kočka` nemůžeme použít na typ `Pes`: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Neplatná definice fragmentu (#FragmentsOnCompositeTypesRule)** - -Všechny fragmenty musí být definovány na (pomocí `on ...`) složeném typu, zkráceně: objektu, rozhraní nebo svazu. - -Následující příklady jsou neplatné, protože definování fragmentů na skalárech je neplatné. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Použití směrnic - -**Direktiv nelze na tomto místě použít (#KnownDirectivesRule)** - -Lze použít pouze direktivy GraphQL (`@...`) podporované Graf API. - -Zde je příklad s direktivami podporovanými GraphQL: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Poznámka: `@stream`, `@live`, `@defer` nejsou podporovány._ - -**Direktiv lze v tomto umístění použít pouze jednou (#UniqueDirectivesPerLocationRule)** - -Směrnice podporované nástrojem Grafu lze v jednom umístění použít pouze jednou. - -Následující text je neplatný (a nadbytečný): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/ko/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/cs/resources/subgraph-studio-faq.mdx similarity index 100% rename from website/src/pages/ko/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/cs/resources/subgraph-studio-faq.mdx diff --git a/website/src/pages/cs/subgraphs/_meta-titles.json b/website/src/pages/cs/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/cs/subgraphs/_meta-titles.json +++ b/website/src/pages/cs/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/cs/subgraphs/_meta.js b/website/src/pages/cs/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/cs/subgraphs/_meta.js +++ b/website/src/pages/cs/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/cs/subgraphs/best-practices/_meta.js b/website/src/pages/cs/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/cs/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/fr/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/cs/subgraphs/best-practices/avoid-eth-calls.mdx similarity index 89% rename from website/src/pages/fr/subgraphs/cookbook/avoid-eth-calls.mdx rename to website/src/pages/cs/subgraphs/best-practices/avoid-eth-calls.mdx index a0613bf2b69f..4b24fafac947 100644 --- a/website/src/pages/fr/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/cs/subgraphs/best-practices/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' --- ## TLDR @@ -103,14 +104,14 @@ You can significantly improve indexing performance by minimizing or eliminating ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/cs/subgraphs/best-practices/derivedfrom.mdx similarity index 82% rename from website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx rename to website/src/pages/cs/subgraphs/best-practices/derivedfrom.mdx index 22845a8d7dd2..344c906ffe55 100644 --- a/website/src/pages/es/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/cs/subgraphs/best-practices/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' --- ## TLDR @@ -62,7 +63,6 @@ Just by adding the `@derivedFrom` directive, this schema will only store the “ This will not only make our subgraph more efficient, but it will also unlock three features: 1. We can query the `Post` and see all of its comments. - 2. We can do a reverse lookup and query any `Comment` and see which post it comes from. 3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. @@ -75,14 +75,14 @@ For a more detailed explanation of strategies to avoid large arrays, check out K ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/cs/subgraphs/best-practices/grafting-hotfix.mdx similarity index 92% rename from website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx rename to website/src/pages/cs/subgraphs/best-practices/grafting-hotfix.mdx index a0bd3f4ab1c2..ae41a5ce20ba 100644 --- a/website/src/pages/nl/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/cs/subgraphs/best-practices/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' --- ## TLDR @@ -173,14 +174,14 @@ By incorporating grafting into your subgraph development workflow, you can enhan ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/cs/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx similarity index 87% rename from website/src/pages/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx rename to website/src/pages/cs/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..067f26ffacf7 100644 --- a/website/src/pages/fr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/cs/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood @@ -177,14 +178,14 @@ Read more about using Immutable Entities and Bytes as IDs in this blog post by D ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/de/subgraphs/cookbook/pruning.mdx b/website/src/pages/cs/subgraphs/best-practices/pruning.mdx similarity index 78% rename from website/src/pages/de/subgraphs/cookbook/pruning.mdx rename to website/src/pages/cs/subgraphs/best-practices/pruning.mdx index c6b1217db9a5..b620e504ab86 100644 --- a/website/src/pages/de/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/cs/subgraphs/best-practices/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' --- ## TLDR @@ -42,14 +43,14 @@ Pruning using `indexerHints` is a best practice for subgraph development, offeri ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/de/subgraphs/cookbook/timeseries.mdx b/website/src/pages/cs/subgraphs/best-practices/timeseries.mdx similarity index 90% rename from website/src/pages/de/subgraphs/cookbook/timeseries.mdx rename to website/src/pages/cs/subgraphs/best-practices/timeseries.mdx index 0168be53d7ed..2c721a9cef23 100644 --- a/website/src/pages/de/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/cs/subgraphs/best-practices/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' --- ## TLDR @@ -181,14 +182,14 @@ By adopting this pattern, developers can build more efficient and scalable subgr ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/cs/subgraphs/cookbook/_meta.js b/website/src/pages/cs/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/cs/subgraphs/cookbook/_meta.js +++ b/website/src/pages/cs/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index da01b6cad361..000000000000 --- a/website/src/pages/cs/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Doporučený postup pro podgraf 4 - Zlepšení rychlosti indexování vyhnutím se eth_calls ---- - -## TLDR - -`eth_calls` jsou volání, která lze provést z podgrafu do uzlu Ethereum. Tato volání zabírají značnou dobu, než vrátí data, což zpomaluje indexování. Pokud je to možné, navrhněte chytré kontrakty tak, aby emitovaly všechna potřebná data, takže nebudete muset používat `eth_calls`. - -## Proč je dobré se vyhnout `eth_calls` - -Podgraf jsou optimalizovány pro indexování dat událostí emitovaných z chytré smlouvy. Podgraf může také indexovat data pocházející z `eth_call`, což však může indexování podgrafu výrazně zpomalit, protože `eth_calls` vyžadují externí volání chytrých smluv. Odezva těchto volání nezávisí na podgrafu, ale na konektivitě a odezvě dotazovaného uzlu Ethereum. Minimalizací nebo eliminací eth_calls v našich podgrafech můžeme výrazně zvýšit rychlost indexování. - -### Jak vypadá eth_call? - -`eth_calls` jsou často nutné, pokud data potřebná pro podgraf nejsou dostupná prostřednictvím emitovaných událostí. Uvažujme například scénář, kdy podgraf potřebuje zjistit, zda jsou tokeny ERC20 součástí určitého poolu, ale smlouva emituje pouze základní událost `Transfer` a neemituje událost, která by obsahovala data, která potřebujeme: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Předpokládejme, že příslušnost tokenů k poolu je určena stavovou proměnnou s názvem `getPoolInfo`. V takovém případě bychom k dotazu na tato data potřebovali použít příkaz `eth_call`: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -To je funkční, ale není to ideální, protože to zpomaluje indexování našeho podgrafu. - -## Jak odstranit `eth_calls` - -V ideálním případě by měl být inteligentní kontrakt aktualizován tak, aby v rámci událostí vysílal všechna potřebná data. Například úprava inteligentního kontraktu tak, aby v události obsahoval informace o bazénu, by mohla odstranit potřebu `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -Díky této aktualizaci může podgraf přímo indexovat požadovaná data bez externích volání: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -To je mnohem výkonnější, protože to odstranilo potřebu `eth_calls`. - -## Jak optimalizovat `eth_calls` - -Pokud úprava inteligentního kontraktu není možná a `eth_calls` jsou nutné, přečtěte si článek "[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)" od Simon Emanuel Schmid, kde se dozvíte různé strategie, jak optimalizovat `eth_calls`. - -## Snížení běhové režie `eth_calls` - -U `eth_calls`, které nelze odstranit, lze jejich režii za běhu minimalizovat jejich deklarací v manifestu. Když `graph-node` zpracovává blok, provede všechny deklarované `eth_calls` paralelně před spuštěním obslužných programů. Volání, která nejsou deklarována, se při běhu obslužných programů provádějí postupně. Zlepšení běhu je způsobeno tím, že se volání provádějí paralelně, nikoliv sekvenčně - to pomáhá zkrátit celkový čas strávený voláním, ale zcela ho neeliminuje. - -V současné době lze `eth_calls` deklarovat pouze pro obsluhy událostí. V manifestu napište - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -Žlutě zvýrazněná část je deklarace volání. Část před dvojtečkou je pouze textový popisek, který se používá pouze pro chybová hlášení. Část za dvojtečkou má tvar `Contract[address].function(params)`. Přípustné hodnoty pro adresu a params jsou `event.address` a `event.params.`. - -Samotná obslužná rutina přistupuje k výsledku tohoto `eth_call` přesně tak, jak je uvedeno v předchozí části, a to navázáním na smlouvu a provedením volání. graph-node cachuje výsledky deklarovaných `eth_call` v paměti a volání obslužné rutiny získá výsledek z této paměťové cache místo skutečného volání RPC. - -Poznámka: Deklarované eth_calls lze provádět pouze v podgraf s verzí specVersion >= 1.2.0. - -## Závěr - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 1e78f786b696..000000000000 --- a/website/src/pages/cs/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Podgraf Doporučený postup 2 - Zlepšení indexování a rychlosti dotazů pomocí @derivedFrom ---- - -## TLDR - -Pole ve vašem schématu mohou skutečně zpomalit výkon podgrafu, pokud jejich počet přesáhne tisíce položek. Pokud je to možné, měla by se při použití polí používat direktiva `@derivedFrom`, která zabraňuje vzniku velkých polí, zjednodušuje obslužné programy a snižuje velikost jednotlivých entit, čímž výrazně zvyšuje rychlost indexování a výkon dotazů. - -## Jak používat směrnici `@derivedFrom` - -Stačí ve schématu za pole přidat směrnici `@derivedFrom`. Takto: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` vytváří efektivní vztahy typu one-to-many, které umožňují dynamické přiřazení entity k více souvisejícím entitám na základě pole v související entitě. Tento přístup odstraňuje nutnost ukládat duplicitní data na obou stranách vztahu, čímž se podgraf stává efektivnějším. - -### Příklad případu použití pro `@derivedFrom` - -Příkladem dynamicky rostoucího pole je blogovací platforma, kde "příspěvek“ může mít mnoho "komentářů“. - -Začněme s našimi dvěma entitami, `příspěvek` a `Komentář` - -Bez optimalizace byste to mohli implementovat takto pomocí pole: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Taková pole budou efektivně ukládat další data komentářů na straně Post vztahu. - -Zde vidíte, jak vypadá optimalizovaná verze s použitím `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Pouhým přidáním direktivy `@derivedFrom` bude toto schéma ukládat "Komentáře“ pouze na straně "Komentáře“ vztahu a nikoli na straně "Příspěvek“ vztahu. Pole se ukládají napříč jednotlivými řádky, což umožňuje jejich výrazné rozšíření. To může vést k obzvláště velkým velikostem, pokud je jejich růst neomezený. - -Tím se nejen zefektivní náš podgraf, ale také se odemknou tři funkce: - -1. Můžeme se zeptat na `Post` a zobrazit všechny jeho komentáře. - -2. Můžeme provést zpětné vyhledávání a dotazovat se na jakýkoli `Komentář` a zjistit, ze kterého příspěvku pochází. - -3. Pomocí [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) můžeme odemknout možnost přímého přístupu a manipulace s daty z virtuálních vztahů v našich mapováních podgrafů. - -## Závěr - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 934f1dd419c3..000000000000 --- a/website/src/pages/cs/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Přehled - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Závěr - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Další zdroje - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index f63f6ba6fb03..000000000000 --- a/website/src/pages/cs/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Osvědčený postup 3 - Zlepšení indexování a výkonu dotazů pomocí neměnných entit a bytů jako ID ---- - -## TLDR - -Použití neměnných entit a bytů pro ID v našem souboru `schema.graphql` [výrazně zlepšuje ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) rychlost indexování a výkonnost dotazů. - -## Nezměnitelné entity - -Aby byla entita neměnná, jednoduše k ní přidáme `(immutable: true)`. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -Tím, že je entita `Transfer` neměnná, je grafový uzel schopen ji zpracovávat efektivněji, což zvyšuje rychlost indexování a odezvu dotazů. - -Struktury neměnných entit se v budoucnu nezmění. Ideální entitou, která by se měla stát nezměnitelnou entitou, by byla entita, která přímo zaznamenává data událostí v řetězci, například událost `Převod` by byla zaznamenána jako entita `Převod`. - -### Pod kapotou - -Mutabilní entity mají "rozsah bloku", který udává jejich platnost. Aktualizace těchto entit vyžaduje, aby uzel grafu upravil rozsah bloků předchozích verzí, což zvyšuje zatížení databáze. Dotazy je také třeba filtrovat, aby byly nalezeny pouze živé entity. Neměnné entity jsou rychlejší, protože jsou všechny živé, a protože se nebudou měnit, nejsou při zápisu nutné žádné kontroly ani aktualizace a při dotazech není nutné žádné filtrování. - -### Kdy nepoužívat nezměnitelné entity - -Pokud máte pole, jako je `status`, které je třeba v průběhu času měnit, neměli byste entitu učinit neměnnou. Jinak byste měli používat neměnné entity, kdykoli je to možné. - -## Bajty jako IDs - -Každá entita vyžaduje ID. V předchozím příkladu vidíme, že ID je již typu Bytes. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -I když jsou možné i jiné typy ID, například String a Int8, doporučuje se pro všechna ID používat typ Bytes, protože pro uložení binárních dat zabírají znakové řetězce dvakrát více místa než řetězce Byte a při porovnávání znakových řetězců UTF-8 se musí brát v úvahu locale, což je mnohem dražší než bytewise porovnávání používané pro porovnávání řetězců Byte. - -### Důvody, proč nepoužívat bajty jako IDs - -1. Pokud musí být IDs entit čitelné pro člověka, například automaticky doplňované číselné IDs nebo čitelné řetězce, neměly by být použity bajty pro IDs. -2. Při integraci dat podgrafu s jiným datovým modelem, který nepoužívá bajty jako IDs, by se bajty jako IDs neměly používat. -3. Zlepšení výkonu indexování a dotazování není žádoucí. - -### Konkatenace s byty jako IDs - -V mnoha podgrafech se běžně používá spojování řetězců ke spojení dvou vlastností události do jediného ID, například pomocí `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. Protože se však tímto způsobem vrací řetězec, značně to zhoršuje indexování podgrafů a výkonnost dotazování. - -Místo toho bychom měli použít metodu `concatI32()` pro spojování vlastností událostí. Výsledkem této strategie je ID `Bytes`, které je mnohem výkonnější. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Třídění s bajty jako ID - -Třídění pomocí bajtů jako IDs není optimální, jak je vidět v tomto příkladu dotazu a odpovědi. - -Dotaz: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Odpověď na dotaz: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -ID jsou vrácena v hex. - -Abychom zlepšili třídění, měli bychom v entitě vytvořit další pole, které bude BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -To umožní postupnou optimalizaci třídění. - -Dotaz: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Odpověď na dotaz: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Závěr - -Bylo prokázáno, že použití neměnných entit i bytů jako ID výrazně zvyšuje efektivitu podgrafů. Testy konkrétně ukázaly až 28% nárůst výkonu dotazů a až 48% zrychlení indexace. - -Více informací o používání nezměnitelných entit a bytů jako ID najdete v tomto příspěvku na blogu Davida Lutterkorta, softwarového inženýra ve společnosti Edge & Node: [Dvě jednoduchá vylepšení výkonu podgrafu](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/cs/subgraphs/cookbook/pruning.mdx b/website/src/pages/cs/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c818c06f37d5..000000000000 --- a/website/src/pages/cs/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Doporučený postup 1 - Zlepšení rychlosti dotazu pomocí ořezávání podgrafů ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) odstraní archivní entity z databáze podgrafu až do daného bloku a odstranění nepoužívaných entit z databáze podgrafu zlepší výkonnost dotazu podgrafu, často výrazně. Použití `indexerHints` je snadný způsob, jak podgraf ořezat. - -## Jak prořezat podgraf pomocí `indexerHints` - -Přidejte do manifestu sekci `indexerHints`. - -`indexerHints` má tři možnosti `prune`: - -- `prune: auto`: Udržuje minimální potřebnou historii nastavenou indexátorem, čímž optimalizuje výkon dotazu. Toto je obecně doporučené nastavení a je výchozí pro všechny podgrafy vytvořené pomocí `graph-cli` >= 0.66.0. -- `prune: `: Nastaví vlastní omezení počtu historických bloků, které se mají zachovat. -- `prune: never`: Je výchozí, pokud není k dispozici sekce `indexerHints`. `prune: never` by mělo být vybráno, pokud jsou požadovány [Dotazy na cestování časem](/subgraphs/querying/graphql-api/#time-travel-queries). - -Aktualizací souboru `subgraph.yaml` můžeme do podgrafů přidat `indexerHints`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Důležité úvahy - -- Pokud jsou kromě ořezávání požadovány i [dotazy na cestování v čase](/subgraphs/querying/graphql-api/#time-travel-queries), musí být ořezávání provedeno přesně, aby byla zachována funkčnost dotazů na cestování v čase. Z tohoto důvodu se obecně nedoporučuje používat `indexerHints: prune: auto` s Time Travel Queries. Místo toho proveďte ořezávání pomocí `indexerHints: prune: ` pro přesné ořezání na výšku bloku, která zachovává historická data požadovaná dotazy Time Travel, nebo použijte `prune: never` pro zachování všech dat. - -- Není možné [roubovat](/subgraphs/cookbook/grafting/) na výšku bloku, který byl prořezán. Pokud se roubování provádí běžně a je požadováno prořezání, doporučuje se použít `indexerHints: prune: ` který přesně zachová stanovený počet bloků (např. dostatečný počet na šest měsíců). - -## Závěr - -Ořezávání pomocí `indexerHints` je osvědčeným postupem pro vývoj podgrafů, který nabízí významné zlepšení výkonu dotazů. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/cs/subgraphs/cookbook/timeseries.mdx b/website/src/pages/cs/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 7178891769da..000000000000 --- a/website/src/pages/cs/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Přehled - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Důležité úvahy - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Příklad: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Příklad: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Příklad: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Příklad: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Závěr - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/cs/subgraphs/developing/deploying/_meta.js b/website/src/pages/cs/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/cs/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/cs/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 329cc1056022..000000000000 --- a/website/src/pages/cs/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: FAQs Podgraf Studio ---- - -## 1. Co je Podgraf Studio? - -[Podgraf Studio](https://thegraph.com/studio/) je aplikace pro vytváření, správu a publikování podgrafů a klíčů API. - -## 2. Jak vytvořím klíč API? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. Mohu vytvořit více klíčů API? - -Ano! Můžete si vytvořit více klíčů API a používat je v různých projektech. Podívejte se na odkaz [zde](https://thegraph.com/studio/apikeys/). - -## 4. Jak omezím doménu pro klíč API? - -Po vytvoření klíče API můžete v části Zabezpečení definovat domény, které se mohou dotazovat na konkrétní klíč API. - -## 5. Mohu svůj podgraf převést na jiného vlastníka? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -Všimněte si, že po přenesení podgrafu jej již nebudete moci ve Studio zobrazit ani upravovat. - -## 6. Jak najdu adresy URL dotazů pro podgrafy, pokud nejsem Vývojář podgrafu, který chci použít? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -Nezapomeňte, že si můžete vytvořit klíč API a dotazovat se na libovolný podgraf zveřejněný v síti, i když si podgraf vytvoříte sami. Tyto dotazy prostřednictvím nového klíče API jsou placené dotazy jako jakékoli jiné v síti. diff --git a/website/src/pages/cs/subgraphs/developing/publishing/_meta.js b/website/src/pages/cs/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/cs/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/cs/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/cs/subgraphs/querying/_meta.js b/website/src/pages/cs/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/cs/subgraphs/querying/_meta.js +++ b/website/src/pages/cs/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/de/resources/_meta-titles.json b/website/src/pages/de/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/de/resources/_meta-titles.json +++ b/website/src/pages/de/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/de/resources/_meta.js b/website/src/pages/de/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/de/resources/_meta.js +++ b/website/src/pages/de/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/de/resources/release-notes/_meta.js b/website/src/pages/de/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/de/resources/release-notes/_meta.js rename to website/src/pages/de/resources/migration-guides/_meta.js diff --git a/website/src/pages/nl/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/de/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 100% rename from website/src/pages/nl/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/de/resources/migration-guides/assemblyscript-migration-guide.mdx diff --git a/website/src/pages/en/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/de/resources/migration-guides/graphql-validations-migration-guide.mdx similarity index 99% rename from website/src/pages/en/resources/release-notes/graphql-validations-migration-guide.mdx rename to website/src/pages/de/resources/migration-guides/graphql-validations-migration-guide.mdx index 4d909e8970a8..29fed533ef8c 100644 --- a/website/src/pages/en/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/de/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -1,5 +1,5 @@ --- -title: GraphQL Validations migration guide +title: GraphQL Validations Migration Guide --- Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). diff --git a/website/src/pages/de/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/de/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 31788f43b53c..000000000000 --- a/website/src/pages/de/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL Validations migration guide ---- - -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). - -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. - -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. - -It will also ensure determinism of query responses, a key requirement on The Graph Network. - -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. - -To be compliant with those validations, please follow the migration guide. - -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. - -## Migration guide - -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. - -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. - -## Migration CLI tool - -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** - -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. - -### **Getting started** - -You can run the tool as follows: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notes:** - -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### CLI output - -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). - -## Run your local queries against the preview schema - -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. - -You can try out queries by sending them to: - -- `https://api-next.thegraph.com/subgraphs/id/` - -oder - -- `https://api-next.thegraph.com/subgraphs/name//` - -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. - -## How to solve issues - -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. - -### GraphQL variables, operations, fragments, or arguments must be unique - -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. - -A GraphQL operation is only valid if it does not contain any ambiguity. - -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. - -Here's an example of a few invalid operations that violates these rules: - -**Duplicate Query name (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Lösung:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Lösung:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Lösung:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Lösung:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Lösung:_ - -```graphql -query { - someField - otherField -} -``` - -Or name the two queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Overlapping Fields - -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. - -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. - -Here are a few examples of invalid operations that violate this rule: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Lösung:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Lösung:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Unused Variables or Fragments - -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. - -Here are a few examples for GraphQL operations that violates these rules: - -**Unused variable** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Lösung:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Lösung:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Invalid or missing Selection-Set (#ScalarLeafsRule) - -Also, a GraphQL field selection is only valid if the following is validated: - -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. - -Here are a few examples of violations of these rules with the following Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Lösung:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Lösung:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Incorrect Arguments values (#VariablesInAllowedPositionRule) - -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. - -Here are a few examples of invalid operations that violate these rules: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) - -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. - -Those unknown references must be fixed: - -- rename if it was a typo -- otherwise, remove - -### Fragment: invalid spread or definition - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** - -A Fragment cannot be spread on a non-applicable type. - -Example, we cannot apply a `Cat` fragment to the `Dog` type: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** - -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. - -The following examples are invalid, since defining fragments on scalars is invalid. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Directives usage - -**Directive cannot be used at this location (#KnownDirectivesRule)** - -Only GraphQL directives (`@...`) supported by The Graph API can be used. - -Here is an example with The GraphQL supported directives: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` are not supported._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** - -The directives supported by The Graph can only be used once per location. - -Folgendes ist ungültig (und überflüssig): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/nl/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/de/resources/subgraph-studio-faq.mdx similarity index 100% rename from website/src/pages/nl/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/de/resources/subgraph-studio-faq.mdx diff --git a/website/src/pages/de/subgraphs/_meta-titles.json b/website/src/pages/de/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/de/subgraphs/_meta-titles.json +++ b/website/src/pages/de/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/de/subgraphs/_meta.js b/website/src/pages/de/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/de/subgraphs/_meta.js +++ b/website/src/pages/de/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/de/subgraphs/best-practices/_meta.js b/website/src/pages/de/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/de/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/de/subgraphs/best-practices/avoid-eth-calls.mdx similarity index 89% rename from website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx rename to website/src/pages/de/subgraphs/best-practices/avoid-eth-calls.mdx index a0613bf2b69f..4b24fafac947 100644 --- a/website/src/pages/es/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/de/subgraphs/best-practices/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' --- ## TLDR @@ -103,14 +104,14 @@ You can significantly improve indexing performance by minimizing or eliminating ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/de/subgraphs/best-practices/derivedfrom.mdx similarity index 82% rename from website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx rename to website/src/pages/de/subgraphs/best-practices/derivedfrom.mdx index 22845a8d7dd2..344c906ffe55 100644 --- a/website/src/pages/de/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/de/subgraphs/best-practices/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' --- ## TLDR @@ -62,7 +63,6 @@ Just by adding the `@derivedFrom` directive, this schema will only store the “ This will not only make our subgraph more efficient, but it will also unlock three features: 1. We can query the `Post` and see all of its comments. - 2. We can do a reverse lookup and query any `Comment` and see which post it comes from. 3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. @@ -75,14 +75,14 @@ For a more detailed explanation of strategies to avoid large arrays, check out K ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/de/subgraphs/best-practices/grafting-hotfix.mdx similarity index 92% rename from website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx rename to website/src/pages/de/subgraphs/best-practices/grafting-hotfix.mdx index a0bd3f4ab1c2..ae41a5ce20ba 100644 --- a/website/src/pages/ko/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/de/subgraphs/best-practices/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' --- ## TLDR @@ -173,14 +174,14 @@ By incorporating grafting into your subgraph development workflow, you can enhan ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/de/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx similarity index 87% rename from website/src/pages/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx rename to website/src/pages/de/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..067f26ffacf7 100644 --- a/website/src/pages/ar/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/de/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood @@ -177,14 +178,14 @@ Read more about using Immutable Entities and Bytes as IDs in this blog post by D ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ar/subgraphs/cookbook/pruning.mdx b/website/src/pages/de/subgraphs/best-practices/pruning.mdx similarity index 78% rename from website/src/pages/ar/subgraphs/cookbook/pruning.mdx rename to website/src/pages/de/subgraphs/best-practices/pruning.mdx index c6b1217db9a5..b620e504ab86 100644 --- a/website/src/pages/ar/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/de/subgraphs/best-practices/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' --- ## TLDR @@ -42,14 +43,14 @@ Pruning using `indexerHints` is a best practice for subgraph development, offeri ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/cookbook/timeseries.mdx b/website/src/pages/de/subgraphs/best-practices/timeseries.mdx similarity index 90% rename from website/src/pages/nl/subgraphs/cookbook/timeseries.mdx rename to website/src/pages/de/subgraphs/best-practices/timeseries.mdx index 0168be53d7ed..2c721a9cef23 100644 --- a/website/src/pages/nl/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/de/subgraphs/best-practices/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' --- ## TLDR @@ -181,14 +182,14 @@ By adopting this pattern, developers can build more efficient and scalable subgr ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/de/subgraphs/cookbook/_meta.js b/website/src/pages/de/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/de/subgraphs/cookbook/_meta.js +++ b/website/src/pages/de/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/de/subgraphs/developing/deploying/_meta.js b/website/src/pages/de/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/de/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/de/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/de/subgraphs/developing/publishing/_meta.js b/website/src/pages/de/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/de/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/de/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/de/subgraphs/querying/_meta.js b/website/src/pages/de/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/de/subgraphs/querying/_meta.js +++ b/website/src/pages/de/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/en/resources/_meta-titles.json b/website/src/pages/en/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/en/resources/_meta-titles.json +++ b/website/src/pages/en/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/en/resources/_meta.js b/website/src/pages/en/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/en/resources/_meta.js +++ b/website/src/pages/en/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/en/resources/release-notes/_meta.js b/website/src/pages/en/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/en/resources/release-notes/_meta.js rename to website/src/pages/en/resources/migration-guides/_meta.js diff --git a/website/src/pages/pl/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/en/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 100% rename from website/src/pages/pl/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/en/resources/migration-guides/assemblyscript-migration-guide.mdx diff --git a/website/src/pages/ko/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/en/resources/migration-guides/graphql-validations-migration-guide.mdx similarity index 99% rename from website/src/pages/ko/resources/release-notes/graphql-validations-migration-guide.mdx rename to website/src/pages/en/resources/migration-guides/graphql-validations-migration-guide.mdx index 4d909e8970a8..29fed533ef8c 100644 --- a/website/src/pages/ko/resources/release-notes/graphql-validations-migration-guide.mdx +++ b/website/src/pages/en/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -1,5 +1,5 @@ --- -title: GraphQL Validations migration guide +title: GraphQL Validations Migration Guide --- Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). diff --git a/website/src/pages/pl/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/en/resources/subgraph-studio-faq.mdx similarity index 100% rename from website/src/pages/pl/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/en/resources/subgraph-studio-faq.mdx diff --git a/website/src/pages/en/subgraphs/_meta-titles.json b/website/src/pages/en/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/en/subgraphs/_meta-titles.json +++ b/website/src/pages/en/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/en/subgraphs/_meta.js b/website/src/pages/en/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/en/subgraphs/_meta.js +++ b/website/src/pages/en/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/en/subgraphs/best-practices/_meta.js b/website/src/pages/en/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/en/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/en/subgraphs/best-practices/avoid-eth-calls.mdx similarity index 89% rename from website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx rename to website/src/pages/en/subgraphs/best-practices/avoid-eth-calls.mdx index a0613bf2b69f..4b24fafac947 100644 --- a/website/src/pages/ar/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/en/subgraphs/best-practices/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' --- ## TLDR @@ -103,14 +104,14 @@ You can significantly improve indexing performance by minimizing or eliminating ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/en/subgraphs/best-practices/derivedfrom.mdx similarity index 82% rename from website/src/pages/fr/subgraphs/cookbook/derivedfrom.mdx rename to website/src/pages/en/subgraphs/best-practices/derivedfrom.mdx index 22845a8d7dd2..344c906ffe55 100644 --- a/website/src/pages/fr/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/en/subgraphs/best-practices/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' --- ## TLDR @@ -62,7 +63,6 @@ Just by adding the `@derivedFrom` directive, this schema will only store the “ This will not only make our subgraph more efficient, but it will also unlock three features: 1. We can query the `Post` and see all of its comments. - 2. We can do a reverse lookup and query any `Comment` and see which post it comes from. 3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. @@ -75,14 +75,14 @@ For a more detailed explanation of strategies to avoid large arrays, check out K ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/en/subgraphs/best-practices/grafting-hotfix.mdx similarity index 92% rename from website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx rename to website/src/pages/en/subgraphs/best-practices/grafting-hotfix.mdx index a0bd3f4ab1c2..ae41a5ce20ba 100644 --- a/website/src/pages/de/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/en/subgraphs/best-practices/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' --- ## TLDR @@ -173,14 +174,14 @@ By incorporating grafting into your subgraph development workflow, you can enhan ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/en/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx similarity index 87% rename from website/src/pages/de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx rename to website/src/pages/en/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..067f26ffacf7 100644 --- a/website/src/pages/de/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/en/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood @@ -177,14 +178,14 @@ Read more about using Immutable Entities and Bytes as IDs in this blog post by D ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/cookbook/pruning.mdx b/website/src/pages/en/subgraphs/best-practices/pruning.mdx similarity index 78% rename from website/src/pages/fr/subgraphs/cookbook/pruning.mdx rename to website/src/pages/en/subgraphs/best-practices/pruning.mdx index c6b1217db9a5..b620e504ab86 100644 --- a/website/src/pages/fr/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/en/subgraphs/best-practices/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' --- ## TLDR @@ -42,14 +43,14 @@ Pruning using `indexerHints` is a best practice for subgraph development, offeri ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/cookbook/timeseries.mdx b/website/src/pages/en/subgraphs/best-practices/timeseries.mdx similarity index 90% rename from website/src/pages/pl/subgraphs/cookbook/timeseries.mdx rename to website/src/pages/en/subgraphs/best-practices/timeseries.mdx index 0168be53d7ed..2c721a9cef23 100644 --- a/website/src/pages/pl/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/en/subgraphs/best-practices/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' --- ## TLDR @@ -181,14 +182,14 @@ By adopting this pattern, developers can build more efficient and scalable subgr ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/en/subgraphs/cookbook/_meta.js b/website/src/pages/en/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/en/subgraphs/cookbook/_meta.js +++ b/website/src/pages/en/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/en/subgraphs/developing/deploying/_meta.js b/website/src/pages/en/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/en/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/en/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/en/subgraphs/developing/publishing/_meta.js b/website/src/pages/en/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/en/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/en/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/en/subgraphs/querying/_meta.js b/website/src/pages/en/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/en/subgraphs/querying/_meta.js +++ b/website/src/pages/en/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/es/resources/_meta-titles.json b/website/src/pages/es/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/es/resources/_meta-titles.json +++ b/website/src/pages/es/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/es/resources/_meta.js b/website/src/pages/es/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/es/resources/_meta.js +++ b/website/src/pages/es/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/es/resources/release-notes/_meta.js b/website/src/pages/es/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/es/resources/release-notes/_meta.js rename to website/src/pages/es/resources/migration-guides/_meta.js diff --git a/website/src/pages/ro/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/es/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 100% rename from website/src/pages/ro/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/es/resources/migration-guides/assemblyscript-migration-guide.mdx diff --git a/website/src/pages/es/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/es/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/es/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/es/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/es/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index bfc973f982dd..000000000000 --- a/website/src/pages/es/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: Guía de Migración de AssemblyScript ---- - -Hasta ahora, los subgrafos han utilizado una de las [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finalmente, hemos añadido soporte para la [el más nuevo disponible](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉! 🎉 - -Esto permitirá a los desarrolladores de subgrafos utilizar las nuevas características del lenguaje AS y la librería estándar. - -Esta guía es aplicable para cualquiera que use `graph-cli`/`graph-ts` bajo la versión `0.22.0`. Si ya estás en una versión superior (o igual) a esa, has estado usando la versión `0.19.10` de AssemblyScript 🙂 - -> Nota: A partir de `0.24.0`, `graph-node` puede soportar ambas versiones, dependiendo del `apiVersion` especificado en el manifiesto del subgrafo. - -## Características - -### Nueva Funcionalidad - -- `TypedArray`s ahora puede construirse desde `ArrayBuffer`s usando el [nuevo `wrap` método estático](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nuevas funciones de la biblioteca estándar: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Se agregó soporte para x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Se agregó `StaticArray`, una más eficiente variante de array ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Se agregó `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Se implementó el argumento `radix` en `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Se agregó soporte para los separadores en los literales de punto flotante ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Se agregó soporte para las funciones de primera clase ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Se agregaron builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Se implementó `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Se agregó soporte para las plantillas de strings literales ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Se agregó `encodeURI(Component)` y `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Se agregó `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Se agregó `toUTCString` para `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Se agregó `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Optimizaciones - -- Funciones `Math` como `exp`, `exp2`, `log`, `log2` y `pow` fueron reemplazadas por variantes más rápidas ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Optimizar ligeramente `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Caché de más accesos a campos en std Map y Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimizar para potencias de dos en `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Otros - -- El tipo de un de array literal ahora puede inferirse a partir de su contenido ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Actualizado stdlib a Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## ¿Cómo actualizar? - -1. Cambiar tus mappings `apiVersion` en `subgraph.yaml` a `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Actualiza la `graph-cli` que usas a la `última` versión: - -```bash -# si lo tiene instalada de forma global -npm install --global @graphprotocol/graph-cli@latest - -# o desde su subgrafo si lo tiene como dependencia -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Haz lo mismo con `graph-ts`, pero en lugar de instalarlo globalmente, guárdalo en tus dependencias principales: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Sigue el resto de la guía para arreglar los cambios que rompen el lenguaje. -5. Ejecuta `codegen` y `deploy` nuevamente. - -## Rompiendo los esquemas - -### Anulabilidad - -En la versión anterior de AssemblyScript, podías crear un código como el siguiente: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -Sin embargo, en la versión más reciente, debido a que el valor es anulable, es necesario que lo compruebes, así: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -O forzarlo así: - -```typescript -let maybeValue = load()! // rompiendo el runtime si el valor es nulo - -maybeValue.aMethod() -``` - -Si no estás seguro de cuál elegir, te recomendamos que utilices siempre la versión segura. Si el valor no existe, es posible que quieras hacer una declaración if temprana con un retorno en tu handler de subgrafo. - -### Variable Shadowing - -Antes podías hacer [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) y un código como este funcionaría: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Sin embargo, ahora esto ya no es posible, y el compilador devuelve este error: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -Tendrás que cambiar el nombre de las variables duplicadas si tienes una variable shadowing. - -### Comparaciones Nulas - -Al hacer la actualización en un subgrafo, a veces pueden aparecer errores como estos: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -Para solucionarlo puedes simplemente cambiar la declaración `if` por algo así: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -Lo mismo ocurre si haces != en lugar de ==. - -### Casting - -La forma común de hacer el casting antes era simplemente usar la palabra clave `as`, de la siguiente forma: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -Sin embargo, esto solo funciona en dos casos: - -- Casting de primitivas (entre tipos como `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- Upcasting en la herencia de clases (subclase → superclase) - -Ejemplos: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -Hay dos escenarios en los que puede querer cast, pero usando `as`/`var` **no es seguro**: - -- Downcasting en la herencia de clases (superclase → subclase) -- Entre dos tipos que comparten una superclase - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -Para esos casos, puedes usar la función `changetype`: - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// entre dos tipos que comparten un superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -Si solo quieres eliminar la anulabilidad, puedes seguir usando el `as` operador (o `variable`), pero asegúrate de que el valor no puede ser nulo, de lo contrario se romperá. - -```typescript -// eliminar anulabilidad -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // sabe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -Para el caso de la anulabilidad se recomienda echar un vistazo al [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), hará que tu código sea más limpio 🙂 - -También hemos añadido algunos métodos estáticos en algunos tipos para facilitar el casting, son: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Comprobación de anulabilidad con acceso a la propiedad - -Para usar el [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) puedes usar la declaración `if` o el operador ternario (`?` and `:`) asi: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// o - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -Sin embargo eso solo funciona cuando estás haciendo el `if` / ternario en una variable, no en un acceso a una propiedad, como este: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile -``` - -Lo que produce este error: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -Para solucionar este problema, puedes crear una variable para ese acceso a la propiedad de manera que el compilador pueda hacer la magia de la comprobación de nulidad: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) -``` - -### Sobrecarga de operadores con acceso a propiedades - -Si intentas sumar (por ejemplo) un tipo anulable (desde un acceso a una propiedad) con otro no anulable, el compilador de AssemblyScript en lugar de dar un error en el tiempo de compilación advirtiendo que uno de los valores es anulable, simplemente compila en silencio, dando oportunidad a que el código se rompa en tiempo de ejecución. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should -``` - -Hemos abierto un tema en el compilador de AssemblyScript para esto, pero por ahora si haces este tipo de operaciones en tus mapeos de subgrafos, deberías cambiarlos para hacer una comprobación de nulos antes de ello. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt -``` - -### Inicialización del valor - -Si tienes algún código como este: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Compilará pero se romperá en tiempo de ejecución, eso ocurre porque el valor no ha sido inicializado, así que asegúrate de que tu subgrafo ha inicializado sus valores, así: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -También si tienes propiedades anulables en una entidad GraphQL, como esta: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -Y tienes un código similar a este: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -Tendrás que asegurarte de inicializar el valor `total.amount`, porque si intentas acceder como en la última línea para la suma, se bloqueará. Así que o bien la inicializas primero: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -O simplemente puedes cambiar tu esquema GraphQL para no usar un tipo anulable para esta propiedad, entonces la inicializaremos como cero en el paso `codegen` 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // justo luego de haber iniciado la propiedad de no-anulable -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Inicialización de las propiedades de la clase - -Si exportas alguna clase con propiedades que son otras clases (declaradas por ti o por la librería estándar) así: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -El compilador dará un error porque tienes que añadir un inicializador para las propiedades que son clases, o añadir el operador `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// o - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### Inicialización de Array - -La clase `Array` sigue aceptando un número para inicializar la longitud de la lista, sin embargo hay que tener cuidado porque operaciones como `.push` en realidad aumentarán el tamaño en lugar de añadirlo al principio, por ejemplo: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -Dependiendo de los tipos que estés utilizando, por ejemplo los anulables, y de cómo estés accediendo a ellos, podrías encontrarte con un error de ejecución como este: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -Para realmente empujar al principio deberías o bien, inicializar el `Array` con tamaño cero, así: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -O debería mutar a través de un índice: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### Esquema GraphQL - -Esto no es un cambio directo de AssemblyScript, pero es posible que tengas que actualizar tu archivo `schema.graphql`. - -Ahora ya no puedes definir campos en tus tipos que sean Listas No Anulables. Si tienes un esquema como este: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -Tendrás que añadir un `!` al miembro del tipo Lista, así: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -Esto cambió debido a las diferencias de anulabilidad entre las versiones de AssemblyScript, y está relacionado con el archivo `src/generated/schema.ts` (ruta por defecto, puede que lo hayas cambiado). - -### Otros - -- Alineado `Map#set` y `Set#add` con el spec, devolviendo `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Las arrays ya no heredan de ArrayBufferView, sino que son distintas ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Las clases inicializadas a partir de objetos literales ya no pueden definir un constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- El resultado de una operación binaria `**` es ahora el entero denominador común si ambos operandos son enteros. Anteriormente, el resultado era un flotante como si se llamara a `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerción `NaN` a `false` cuando casting a `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Al desplazar un valor entero pequeño de tipo `i8`/`u8` o `i16`/`u16`, sólo los 3 o 4 bits menos significativos del valor RHS afectan al resultado, de forma análoga al resultado de un `i32.shl` que sólo se ve afectado por los 5 bits menos significativos del valor RHS. Ejemplo: `someI8 << 8` previamente producía el valor `0`, pero ahora produce `someI8` debido a enmascarar el RHS como `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Corrección de errores en las comparaciones de strings relacionales cuando los tamaños difieren ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/es/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/es/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 55801738ddca..000000000000 --- a/website/src/pages/es/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,534 +0,0 @@ ---- -title: Guía de migración de Validaciones GraphQL ---- - -Pronto `graph-node` admitirá una cobertura del 100% de la [especificación de validaciones GraphQL](https://spec.graphql.org/June2018/#sec-Validation). - -Versiones anteriores de `graph-node` no soportaban todas las validaciones y proporcionaban respuestas más gráciles, por lo que en casos de ambigüedad, `graph-node` ignoraba componentes inválidos de operaciones GraphQL. - -El soporte de Validaciones GraphQL es el pilar para las próximas nuevas características y el rendimiento a escala de The Graph Network. - -También asegurará la determinismo de las respuestas de las consultas, un requisito clave en The Graph Network. - -**Habilitar las Validaciones GraphQL romperá algunas consultas existentes** enviadas a la API de The Graph. - -Para ser compatible con esas validaciones, por favor sigue la guía de migración. - -> ⚠️ Si no migras tus consultas antes de que se implementen las validaciones, estas devolverán errores y podrían romper tus interfaces de usuario/clientes. - -## Guía de migración - -Puedes utilizar la herramienta de migración CLI para encontrar cualquier problema en tus operaciones GraphQL y solucionarlo. Alternativamente, puedes actualizar el endpoint de tu cliente GraphQL para usar el endpoint `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`. Probar tus consultas contra este endpoint te ayudará a encontrar los problemas en tus consultas. - -> No todos los subgrafos deberán migrarse, si estás utilizando [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) o [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), ya se aseguran de que tus consultas sean válidas. - -## Herramienta de migración de la línea de comandos - -**La mayoría de los errores en las operaciones de GraphQL pueden ser encontrados en tu código previamente.** - -Por esta razón, brindamos una experiencia fluida para validar tus operaciones de GraphQL durante el desarrollo o en CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) es una herramienta CLI simple que ayuda a validar operaciones de GraphQL contra un esquema dado. - -### **Empezando** - -Puedes ejecutar la herramienta de la siguiente manera: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notas:** - -- Configura o reemplaza $GITHUB_USER, $SUBGRAPH_NAME con los valores apropiados. Por ejemplo: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- La URL del esquema de vista previa (https://api-next.thegraph.com/) proporcionada tiene una limitación de tasa alta y se descontinuará una vez que todos los usuarios se hayan migrado a la nueva versión. **No lo uses en producción.** -- Las operaciones se identifican en archivos con las siguientes extensiones [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (opción `-o`). - -### Salida del CLI - -La herramienta de línea de comandos `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` mostrará cualquier error en las operaciones GraphQL de la siguiente manera: - -![Error de salida del CLI](https://i.imgur.com/x1cBdhq.png) - -Por cada error, encontrarás una descripción, una ruta de archivo y posición, y un enlace a un ejemplo de solución (ver la siguiente sección). - -## Ejecuta tus consultas locales contra el esquema de vista previa - -Proporcionamos un punto final `https://api-next.thegraph.com/` que ejecuta una versión de `graph-node` que tiene las validaciones activadas. - -Puedes probar tus consultas enviándolas a: - -- `https://api-next.thegraph.com/subgraphs/id` - -o - -- `https://api-next.thegraph.com/subgraphs/name//` - -Para trabajar en consultas que hayan sido identificadas como teniendo errores de validación, puedes usar tu herramienta de consulta GraphQL favorita, como Altair o [GraphiQL](https://cloud.hasura.io/public/graphiql), y probar tu consulta. Esas herramientas también marcarán esos errores en su interfaz de usuario, incluso antes de ejecutarlos. - -## Como resolver problemas - -A continuación, encontrará todos los posibles errores de validación de GraphQL que podrían ocurrir en sus operaciones de GraphQL existentes. - -### Las variables, operaciones, fragmentos o argumentos GraphQL deben ser únicos - -Aplicamos reglas para garantizar que una operación incluye un conjunto único de variables GraphQL, operaciones, fragmentos y argumentos. - -Una operación GraphQL solo es válida si no contiene ninguna ambigüedad. - -Para lograr eso, necesitamos asegurarnos de que algunos componentes en su operación de GraphQL sean únicos. - -Aquí hay un ejemplo de algunas operaciones inválidas que violan estas reglas: - -**Nombre de consulta duplicado (#UniqueOperationNamesRule)** - -```graphql -# La siguiente operación viola la regla UniqueOperationName, -# ya que tenemos una sola operación con 2 consultas con el mismo nombre -query myData { - id -} - -query myData { - name -} -``` - -_Solución:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Nombre de Fragmento Duplicado (#UniqueFragmentNamesRule)** - -```graphql -# La siguiente operación violó la regla UniqueFragmentName -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solución:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # asignar un nombre único al fragmento metadatos -} - -fragment MyFieldsName { #asigna un nombre único al fragment name -} -``` - -**Nombre de variable duplicado (#UniqueVariableNamesRule)** - -```graphql -# La siguiente operación viola la regla UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solución:_ - -```graphql -query myData($id: String) { - # Manten la variable relevante (aquí: `$id: String`) - id - ...MyFields -} -``` - -**Nombre de argumento duplicado (#UniqueArgument)** - -```graphql -# La siguiente operacion, violó la consulta UniqueArguments -myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solución:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Consulta anonima duplicada (#LoneAnonymousOperationRule)** - -Además, el uso de dos operaciones anónimas violará la regla `LoneAnonymousOperation` debido al conflicto en la estructura de respuesta: - -```graphql -# Esto fallará si se ejecuta en conjunto en -# una sola operación con las siguientes dos consultas: -query { - someField -} - -query { - otherField -} -``` - -_Solución:_ - -```graphql -query { - someField - otherField -} -``` - -O nombra las dos consultas: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Campos superpuestos - -Un conjunto de selección de GraphQL se considera válido solo si resuelve correctamente el conjunto de resultados final. - -Si un conjunto de selección específico o un campo crea ambigüedad ya sea por el campo seleccionado o por los argumentos utilizados, el servicio GraphQL no podrá validar la operación. - -Aquí hay algunos ejemplos de operaciones inválidas que violan estas reglas: - -**Alias de campos en conflicto (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Poner alias a los campos puede causar conflictos -# ya sea con otros alias o con otros campos que existen en el esquema de GraphQL. -query { - dogs { - name: nickname - name - } -} -``` - -_Solución:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias el original `name` field - } -} -``` - -**Campos en conflicto con argumentos (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Diferentes argumentos pueden llevar a diferentes datos, -# asi que no podemos asumir que los capos serán los mismos. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solución:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Además, en casos de uso más complejos, se puede violar esta regla al utilizar dos fragmentos que puedan causar un conflicto en el conjunto de resultados esperado: - -```graphql -query { - # Eventualmente, tenemos dos "x" definiciones, apuntando - # para diferentes campos! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -Además de eso, las directivas GraphQL del lado del cliente como `@skip` y `@include` podrían causar ambigüedad, por ejemplo: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[Puedes leer mas sobre el algoritmo aqui.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Variables o fragmentos no utilizados - -Una operación GraphQL también se considera válida solo si se utilizan todos los componentes definidos en la operación (variables, fragmentos). - -Aquí hay algunos ejemplos de operaciones de GraphQL que violan estas reglas: - -**Variable no utilizada** (#NoUnusedVariablesRule) - -```graphql -# Inválido, porque $someVar nunca se usa. -consulta algo($someVar: String) { - someData -} -``` - -_Solución:_ - -```graphql -query something { - someData -} -``` - -**Fragmento no Utilizado** (#NoUnusedFragmentsRule) - -```graphql -#Inválido, ya que el fragmento AllFields nunca se utiliza. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solución:_ - -```graphql -# Invalido, porque el fragmento AllFields nunca se utiliza. -query something { - someData -} - -# elimina el fragmento `AllFields` -``` - -### Conjunto de selección inválido o faltante (#ScalarLeafsRule) - -Además, una selección de campos GraphQL solo es válida si se valida lo siguiente: - -- Un campo de objeto debe tener un conjunto de selección especificado. -- Un campo de borde (escalar, enum) no debe tener un conjunto de selección especificado. - -Aquí hay algunos ejemplos de violaciones de estas reglas con el siguiente esquema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Conjunto de Selección Inválido** - -```graphql -query { - user { - id { # Invalido, porque "id" es de tipo ID y no tiene sub-campos - - } - } -} -``` - -_Solución:_ - -```graphql -query { - user { - id - } -} -``` - -**Falta el Conjunto de Selección** - -```graphql -query { - user { - id - image # 'image' requiere un conjunto de selección para subcampos! - } -} -``` - -_Solución:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Argumentos de valores incorrectos(#VariablesInAllowedPositionRule) - -Las operaciones de GraphQL que pasan valores codificados a los argumentos deben ser válidas, basadas en el valor definido en el esquema. - -Aquí hay algunos ejemplos de operaciones inválidas que violan estas reglas: - -```graphql -query purposes { - # si "name" esta definido como "String" en el esquema, - # Esta consulta fallará durante la validación. - purpose(name: 1) { - id - } -} - -# Esto también puede suceder cuando se define una variable incorrecta: -query purposes($name: Int!) { - # si "name" esta definido como `String` en el esquema, - # esta consulta fallara durante la validación, por que la - # variable usada es de tipo `Int` - purpose(name: $name) { - id - } -} -``` - -### Tipo, Variable, Fragmento o Directiva Desconocida (#UnknownX) - -La API de GraphQL generará un error si se utiliza algún tipo, variable, fragmento o directiva desconocido. - -Esas referencias desconocidas deben ser corregidas: - -- renombrar si fue un error tipográfico -- de lo contrario, elimina - -### Fragmento: expansión o definición inválida - -**Extensión de fragmento no válida (#PossibleFragmentSpreadsRule)** - -Un fragmento no puede ser aplicado en un tipo no correspondiente. - -Por ejemplo, no podemos aplicar un fragmento `Cat` al tipo `Dog`: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Definicion de fragento inválida (#FragmentsOnCompositeTypesRule)** - -Todos los fragmentos deben definirse en (usando `on ...`) un tipo compuesto, en resumen: objeto, interfaz o unión. - -Los siguientes ejemplos son inválidos, ya que definir fragmentos en escalares es inválido. - -```graphql -fragment fragOnScalar on Int { - # no podemos definir un fragmento sobre un escalar -(`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` no es un subtipo de `Dog` - somethingElse - } -} -``` - -### Uso de directivas - -**La directiva no puede ser utilizada en esta ubicación (#KnownDirectivesRule)** - -Solo las directivas GraphQL (`@...`) soportadas por la API de The Graph pueden ser utilizadas. - -Aqui hay un ejemplo con The GraphQL con las directivas soportadas: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` no son soportadas._ - -**La directiva no puede ser utilizada en esta ubicación (#UniqueDirectivesPerLocationRule)** - -Los directivas soportados por The Graph solo se pueden usar una vez por ubicación. - -La siguiente es inválida (y redundante): - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` diff --git a/website/src/pages/ro/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/es/resources/subgraph-studio-faq.mdx similarity index 100% rename from website/src/pages/ro/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/es/resources/subgraph-studio-faq.mdx diff --git a/website/src/pages/es/subgraphs/_meta-titles.json b/website/src/pages/es/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/es/subgraphs/_meta-titles.json +++ b/website/src/pages/es/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/es/subgraphs/_meta.js b/website/src/pages/es/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/es/subgraphs/_meta.js +++ b/website/src/pages/es/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/es/subgraphs/best-practices/_meta.js b/website/src/pages/es/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/es/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/es/subgraphs/best-practices/avoid-eth-calls.mdx similarity index 89% rename from website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx rename to website/src/pages/es/subgraphs/best-practices/avoid-eth-calls.mdx index a0613bf2b69f..4b24fafac947 100644 --- a/website/src/pages/de/subgraphs/cookbook/avoid-eth-calls.mdx +++ b/website/src/pages/es/subgraphs/best-practices/avoid-eth-calls.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' --- ## TLDR @@ -103,14 +104,14 @@ You can significantly improve indexing performance by minimizing or eliminating ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/es/subgraphs/best-practices/derivedfrom.mdx similarity index 82% rename from website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx rename to website/src/pages/es/subgraphs/best-practices/derivedfrom.mdx index 22845a8d7dd2..344c906ffe55 100644 --- a/website/src/pages/ar/subgraphs/cookbook/derivedfrom.mdx +++ b/website/src/pages/es/subgraphs/best-practices/derivedfrom.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' --- ## TLDR @@ -62,7 +63,6 @@ Just by adding the `@derivedFrom` directive, this schema will only store the “ This will not only make our subgraph more efficient, but it will also unlock three features: 1. We can query the `Post` and see all of its comments. - 2. We can do a reverse lookup and query any `Comment` and see which post it comes from. 3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. @@ -75,14 +75,14 @@ For a more detailed explanation of strategies to avoid large arrays, check out K ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/es/subgraphs/best-practices/grafting-hotfix.mdx similarity index 92% rename from website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx rename to website/src/pages/es/subgraphs/best-practices/grafting-hotfix.mdx index a0bd3f4ab1c2..ae41a5ce20ba 100644 --- a/website/src/pages/pl/subgraphs/cookbook/grafting-hotfix.mdx +++ b/website/src/pages/es/subgraphs/best-practices/grafting-hotfix.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' --- ## TLDR @@ -173,14 +174,14 @@ By incorporating grafting into your subgraph development workflow, you can enhan ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/es/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx similarity index 87% rename from website/src/pages/es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx rename to website/src/pages/es/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx index ed3d902cfad3..067f26ffacf7 100644 --- a/website/src/pages/es/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ b/website/src/pages/es/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' --- ## TLDR @@ -21,7 +22,7 @@ type Transfer @entity(immutable: true) { By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. ### Under the hood @@ -177,14 +178,14 @@ Read more about using Immutable Entities and Bytes as IDs in this blog post by D ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/es/subgraphs/cookbook/pruning.mdx b/website/src/pages/es/subgraphs/best-practices/pruning.mdx similarity index 78% rename from website/src/pages/es/subgraphs/cookbook/pruning.mdx rename to website/src/pages/es/subgraphs/best-practices/pruning.mdx index c6b1217db9a5..b620e504ab86 100644 --- a/website/src/pages/es/subgraphs/cookbook/pruning.mdx +++ b/website/src/pages/es/subgraphs/best-practices/pruning.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' --- ## TLDR @@ -42,14 +43,14 @@ Pruning using `indexerHints` is a best practice for subgraph development, offeri ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/cookbook/timeseries.mdx b/website/src/pages/es/subgraphs/best-practices/timeseries.mdx similarity index 90% rename from website/src/pages/ko/subgraphs/cookbook/timeseries.mdx rename to website/src/pages/es/subgraphs/best-practices/timeseries.mdx index 0168be53d7ed..2c721a9cef23 100644 --- a/website/src/pages/ko/subgraphs/cookbook/timeseries.mdx +++ b/website/src/pages/es/subgraphs/best-practices/timeseries.mdx @@ -1,5 +1,6 @@ --- title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' --- ## TLDR @@ -181,14 +182,14 @@ By adopting this pattern, developers can build more efficient and scalable subgr ## Subgraph Best Practices 1-6 -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/es/subgraphs/cookbook/_meta.js b/website/src/pages/es/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/es/subgraphs/cookbook/_meta.js +++ b/website/src/pages/es/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index ddf69bb91735..000000000000 --- a/website/src/pages/es/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Descripción - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Recursos Adicionales - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/es/subgraphs/cookbook/timeseries.mdx b/website/src/pages/es/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 1dd08ab764d1..000000000000 --- a/website/src/pages/es/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Descripción - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Ejemplo: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Ejemplo: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Ejemplo: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Ejemplo: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/es/subgraphs/developing/deploying/_meta.js b/website/src/pages/es/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/es/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/es/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/es/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/es/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 0c7b9c4610e1..000000000000 --- a/website/src/pages/es/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Preguntas Frecuentes sobre Subgraph Studio ---- - -## 1. ¿Qué es Subgraph Studio? - -[Subgraph Studio](https://thegraph.com/studio/) es una dapp para crear, administrar y publicar subgrafos y claves API. - -## 2. ¿Cómo creo una clave API? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. ¿Puedo crear múltiples claves de API? - -¡Sí! Puedes crear varias claves de API para usar en diferentes proyectos. Consulta el enlace [aquí](https://thegraph.com/studio/apikeys/). - -## 4. ¿Cómo restrinjo un dominio para una clave API? - -Después de crear una clave de API, en la sección Seguridad, puedes definir los dominios que pueden consultar una clave de API específica. - -## 5. ¿Puedo transferir mi subgrafo a otro propietario? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -Ten en cuenta que ya no podrás ver o editar el subgrafo en Studio una vez que haya sido transferido. - -## 6. ¿Cómo encuentro URLs de consulta para subgrafos si no soy el desarrollador del subgrafo que quiero usar? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -Recuerda que puedes crear una clave API y consultar cualquier subgrafo publicado en la red, incluso si tú mismo construyes un subgrafo. Estas consultas a través de la nueva clave API, son consultas pagadas como cualquier otra en la red. diff --git a/website/src/pages/es/subgraphs/developing/publishing/_meta.js b/website/src/pages/es/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/es/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/es/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/es/subgraphs/querying/_meta.js b/website/src/pages/es/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/es/subgraphs/querying/_meta.js +++ b/website/src/pages/es/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/fr/resources/_meta-titles.json b/website/src/pages/fr/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/fr/resources/_meta-titles.json +++ b/website/src/pages/fr/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/fr/resources/_meta.js b/website/src/pages/fr/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/fr/resources/_meta.js +++ b/website/src/pages/fr/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/fr/resources/release-notes/_meta.js b/website/src/pages/fr/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/fr/resources/release-notes/_meta.js rename to website/src/pages/fr/resources/migration-guides/_meta.js diff --git a/website/src/pages/uk/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/fr/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 100% rename from website/src/pages/uk/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/fr/resources/migration-guides/assemblyscript-migration-guide.mdx diff --git a/website/src/pages/fr/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/fr/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/fr/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/fr/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/fr/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index 49e76d908653..000000000000 --- a/website/src/pages/fr/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,523 +0,0 @@ ---- -title: Guide de migration de l'AssemblyScript ---- - -Jusqu'à présent, les subgraphs utilisaient l'une des [premières versions d'AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Enfin, nous avons ajouté la prise en charge du [le plus récent disponible](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) ! 🎉 - -Cela permettra aux développeurs de subgraph d'utiliser les nouvelles fonctionnalités du langage AS et de la bibliothèque standard. - -Ce guide s'applique à toute personne utilisant `graph-cli`/`graph-ts` sous la version `0.22.0`. Si vous utilisez déjà une version supérieure (ou égale), vous utilisez déjà la version `0.19.10` d'AssemblyScript 🙂 - -> Remarque : Depuis `0.24.0`, `graph-node` peut prendre en charge les deux versions, en fonction de la `apiVersion` spécifiée dans le manifeste du subgraph. - -## Fonctionnalités - -### Nouvelle fonctionnalité - -- Les `TypedArray` peuvent désormais être construits à partir de `ArrayBuffer` en utilisant les [nouvelle méthode statique `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nouvelles fonctions de bibliothèque standard: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Ajout de la prise en charge de x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Ajout de `StaticArray`, une variante de tableau plus efficace ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Ajout de `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implémentation de `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Ajout de la prise en charge des séparateurs dans les littéraux à virgule flottante ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Ajout du support pour les fonctions de première classe ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Ajout des fonctions intégrées : `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementation de `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Ajout de la prise en charge des chaînes littérales de modèle ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Ajout de `encodeURI(Component)` et `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Ajout de `toString`, `toDateString` et `toTimeString` à `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Ajout de `toUTCString` pour `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Ajout du type intégré `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Optimizations - -- Les fonctions `Math` telles que `exp`, `exp2`, `log`, `log2` et `pow` ont été remplacées par des variantes plus rapides ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Légère optimisation de `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Mise en cache de plus d'accès aux champs dans std Map et Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimiser pour des puissances de deux `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Autre - -- Le type d'un littéral de tableau peut désormais être déduit de son contenu ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Stdlib mis à jour vers Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## Comment mettre à niveau ? - -1. Modifiez vos mappages `apiVersion` dans `subgraph.yaml` en `0.0.6` : - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Mettez à jour le `graph-cli` que vous utilisez vers la version `dernière` en exécutant : - -```bash -# si vous l'avez installé globalement -npm install --global @graphprotocol/graph-cli@latest - -# ou dans votre subgraph si vous l'avez comme dépendance de développement -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Faites de même pour `graph-ts`, mais au lieu de l'installer globalement, enregistrez-le dans vos dépendances principales : - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Suivez le reste du guide pour corriger les changements de langue. -5. Exécutez `codegen` et `deploy` à nouveau. - -## Modifications radicales - -### Nullability - -Sur l'ancienne version d'AssemblyScript, vous pouviez créer du code comme celui-ci : - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -Cependant, sur la version la plus récente, comme la valeur est nullable, vous devez vérifier, comme ceci : - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` n'est plus nul -} -``` - -Ou forcez-le comme ceci : - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -Si vous ne savez pas lequel choisir, nous vous recommandons de toujours utiliser la version sécurisée. Si la valeur n'existe pas, vous souhaiterez peut-être simplement effectuer une instruction if précoce avec un retour dans votre gestionnaire de subgraph. - -### Ombrage variable - -Avant de pouvoir faire de l'[observation de variables](https://en.wikipedia.org/wiki/Variable_shadowing) et un code comme celui-ci fonctionnerait : - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Cependant, cela n'est plus possible et le compilateur renvoie cette erreur : - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -Vous devrez renommer vos variables en double si vous conservez une observation de variables. - -### Comparaisons nulles - -En effectuant la mise à niveau sur votre subgraph, vous pouvez parfois obtenir des erreurs comme celles-ci : - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -Pour résoudre, vous pouvez simplement remplacer l'instruction `if` par quelque chose comme ceci : - -```typescript - if (!decimals) { - - // ou bien - - if (decimals === null) { -``` - -La même chose s'applique si vous faites != au lieu de ==. - -### Casting - -Auparavant, la manière courante de faire du casting consistait simplement à utiliser le mot-clé `as`, comme ceci : - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -Cependant, cela ne fonctionne que dans deux scénarios : - -- Casting primitif (entre des types tels que `u8`, `i32`, `bool` ; par exemple : `let b : isize = 10 ; b as usize`); -- Upcasting sur l'héritage de classe (sous-classe → superclasse) - -Les Exemples: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -//upcasting lors de l'héritage de classe -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // équivalent à : bytes as Uint8Array -``` - -Il existe deux scénarios dans lesquels vous souhaiterez peut-être diffuser du contenu, mais l'utilisation de `as`/`var` **n'est pas sûre** : - -- Downcasting sur l'héritage de classe (superclasse → sous-classe) -- Entre deux types qui partagent une superclasse - -```typescript -//downcasting lors de l'héritage de classe -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // plante à l'exécution :( -``` - -```typescript -// entre deux types qui partagent une superclasse -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // plante à l'exécution :( -``` - -Dans ces cas-là, vous pouvez utiliser la fonction `changetype` : - -```typescript -//downcasting lors de l'héritage de classe -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // fonctionne :) -``` - -```typescript -// entre deux types qui partagent une superclasse -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // fonctionne :) -``` - -Si vous souhaitez simplement supprimer la nullité, vous pouvez continuer à utiliser l'opérateur `as` (ou `variable`), mais assurez-vous de savoir que la valeur ne peut pas être nulle. sinon ça va casser. - -```typescript -// supprimer la possibilité de valeur nulle (nullability) -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // suppression sûre de null -} - -let newBalance = new AccountBalance(balanceId) -``` - -Pour le cas de nullité, nous vous recommandons de jeter un œil à la [fonctionnalité de vérification de la nullité](https://www.assemblyscript.org/basics.html#nullability-checks), cela rendra votre code plus propre 🙂 - -Nous avons également ajouté quelques méthodes statiques supplémentaires dans certains types pour faciliter la diffusion, à savoir : - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Vérification de nullité avec accès à la propriété - -Pour utiliser la [fonctionnalité de vérification de nullité](https://www.assemblyscript.org/basics.html#nullability-checks), vous pouvez utiliser soit les instructions `if`, soit l'opérateur ternaire (`?` et `:`) comme ce: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// ou - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -Cependant, cela ne fonctionne que lorsque vous effectuez le `if` / ternaire sur une variable, pas sur un accès à une propriété, comme ceci : - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // ne compile pas -``` - -Ce qui génère cette erreur : - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -Pour résoudre ce problème, vous pouvez créer une variable pour l'accès à cette propriété afin que le compilateur puisse effectuer la vérification magique de la nullité : - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compile sans problème :) -``` - -### Surcharge de l'opérateur avec accès à la propriété - -Si vous essayez de additionner (par exemple) un type nullable (à partir d'un accès à une propriété) avec un type non nullable, le compilateur AssemblyScript au lieu de donner une erreur de compilation avertissant que l'une des valeurs est nullable, il compile simplement silencieusement, donnant une chance pour que le code soit interrompu au moment de l'exécution. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // donne une erreur de compilation concernant la nullité -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // ne donne pas d'erreurs de compilation comme il se doit -``` - -Nous avons ouvert un problème sur le compilateur AssemblyScript pour cela, mais pour l'instant, si vous effectuez ce type d'opérations dans vos mappages de subgraph, vous devez les modifier pour effectuer une vérification nulle avant. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // maintenant `n` est garanti comme étant un BigInt -``` - -### Initialisation de valeur - -Si vous avez un code comme celui-ci : - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Il sera compilé mais s'arrêtera au moment de l'exécution, cela se produit parce que la valeur n'a pas été initialisée, alors assurez-vous que votre subgraph a initialisé ses valeurs, comme ceci : - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -De plus, si vous avez des propriétés nullables dans une entité GraphQL, comme ceci : - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -Et vous avez un code similaire à celui-ci : - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -Vous devrez vous assurer d'initialiser la valeur `total.amount`, car si vous essayez d'accéder à la dernière ligne pour la somme, elle plantera. Donc soit vous l'initialisez d'abord : - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -Ou vous pouvez simplement modifier votre schéma GraphQL pour ne pas utiliser de type nullable pour cette propriété, puis nous l'initialiserons à zéro à l'étape `codegen` 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // initialise déjà les propriétés non-nullables -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Initialisation de la propriété de classe - -Si vous exportez des classes avec des propriétés qui sont d'autres classes (déclarées par vous ou par la bibliothèque standard), comme ceci : - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -Le compilateur générera une erreur car vous devez soit ajouter un initialiseur pour les propriétés qui sont des classes, soit ajouter l'opérateur `!` : - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// ou - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// ou - -export class Something { - value!: Thing -} -``` - -### Initialisation du tableau - -La classe `Array` accepte toujours un nombre pour initialiser la longueur de la liste, mais vous devez faire attention car des opérations comme `.push` augmenteront en fait la taille au lieu de l'ajouter au début. , Par exemple: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // taille 6 :( -``` - -En fonction des types que vous utilisez, par exemple les types nullables, et de la manière dont vous y accédez, vous pourriez rencontrer une erreur d'exécution comme celle-ci : - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -Pour réellement pousser au début, vous devez soit initialiser le `Array` avec une taille zéro, comme ceci : - -```typescript -let arr = new Array(0) // [] - -arr.push('quelque chose') // ["quelque chose"] -``` - -Ou vous devriez le muter via index : - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'quelque chose' // ["quelque chose", "", "", "", ""] -``` - -### Schéma GraphQL - -Il ne s'agit pas d'une modification directe d'AssemblyScript, mais vous devrez peut-être mettre à jour votre fichier `schema.graphql`. - -Vous ne pouvez désormais plus définir de champs dans vos types qui sont des listes non nullables. Si vous avez un schéma comme celui-ci : - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # n'est plus valide -} -``` - -Vous devrez ajouter un `!` au membre de type List, comme ceci : - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valide -} -``` - -Cela a changé en raison des différences de nullité entre les versions d'AssemblyScript et est lié au fichier `src/generated/schema.ts` (chemin par défaut, vous avez peut-être modifié cela). - -### Autre - -- Alignement de `Map#set` et `Set#add` avec la spécification, en retournant `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Les tableaux n'héritent plus d'ArrayBufferView, mais sont désormais distincts ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Les classes initialisées à partir de littéraux d'objet ne peuvent plus définir de constructeur ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Le résultat d'une opération binaire `**` est désormais le dénominateur commun entier si les deux opérandes sont des entiers. Auparavant, le résultat était un float comme si vous appeliez `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Convertir `NaN` en `false` lors de la conversion en `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Lors du décalage d'une petite valeur entière de type `i8`/`u8` ou `i16`/`u16`, seuls les 3 respectivement 4 les plus petits les bits significatifs de la valeur RHS affectent le résultat, de la même manière que le résultat d'un `i32.shl` n'est affecté que par les 5 bits les moins significatifs de la valeur RHS. Exemple : `someI8 << 8` produisait auparavant la valeur `0`, mais produit désormais `someI8` en raison du masquage du RHS comme `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Correction d'un bug des comparaisons de chaînes relationnelles lorsque les tailles diffèrent ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/fr/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/fr/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 62e5435c0fc3..000000000000 --- a/website/src/pages/fr/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,536 +0,0 @@ ---- -title: Guide de migration des validations GraphQL ---- - -Bientôt, `graph-node` supportera 100% de la couverture de la [Spécification des validations GraphQL] (https://spec.graphql.org/June2018/#sec-Validation). - -Les versions précédentes de `graph-node` ne prenaient pas en charge toutes les validations et fournissaient des réponses plus gracieuses - ainsi, en cas d'ambiguïté, `graph-node` ignorait les composants d'opérations GraphQL non valides. - -La prise en charge de GraphQL Validations est le pilier des nouvelles fonctionnalités à venir et des performances à grande échelle de The Graph Network. - -Il garantira également le déterminisme des réponses aux requêtes, une exigence clé sur The Graph Network. - -**L'activation des validations GraphQL interrompra certaines requêtes existantes** envoyées à l'API Graph. - -Pour être conforme à ces validations, veuillez suivre le guide de migration. - -> ⚠️ Si vous ne migrez pas vos requêtes avant le déploiement des validations, elles renverront des erreurs et éventuellement casseront vos frontends/clients. - -## Guide de migration - -Vous pouvez utiliser l'outil de migration CLI pour rechercher tous les problèmes dans vos opérations GraphQL et les résoudre. Vous pouvez également mettre à jour le point de terminaison de votre client GraphQL pour utiliser le point de terminaison « https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME ». Tester vos requêtes sur ce point de terminaison vous aidera à trouver les problèmes dans vos requêtes. - -> Tous les subgraphs n'auront pas besoin d'être migrés si vous utilisez [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) ou [GraphQL Code Generator](https://the-guild.dev /graphql/codegen), ils garantissent déjà que vos requêtes sont valides. - -## Outil CLI de migration - -**La plupart des erreurs d'opérations GraphQL peuvent être trouvées à l'avance dans votre base de code.** - -Pour cette raison, nous offrons une expérience fluide pour valider vos opérations GraphQL pendant le développement ou dans CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) est un outil CLI simple qui permet de valider les opérations GraphQL par rapport à un schéma donné. - -### **Commencer** - -Vous pouvez exécuter l'outil comme suit : - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notes:** - -- Définissez ou remplacez $GITHUB_USER, $SUBGRAPH_NAME par les valeurs appropriées. Comme : [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- L'URL du schéma d'aperçu (https://api-next.thegraph.com/) fournie est fortement limitée en débit et sera supprimée une fois que tous les utilisateurs auront migré vers la nouvelle version. **Ne l'utilisez pas en production.** -- Les opérations sont identifiées dans les fichiers avec les extensions suivantes [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx `, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (option `-o`). - -### Sortie CLI - -L'outil CLI `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` affichera toutes les erreurs d'opérations GraphQL comme suit : - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -Pour chaque erreur, vous trouverez une description, le chemin et la position du fichier, ainsi qu'un lien vers un exemple de solution (voir la section suivante). - -## Exécutez vos requêtes locales sur le schéma d'aperçu - -Nous fournissons un point de terminaison « https://api-next.thegraph.com/ » qui exécute une version « graph-node » dont les validations sont activées. - -Vous pouvez tester des requêtes en les envoyant à : - -- `https://api-next.thegraph.com/subgraphs/id/` - -ou bien - -- `https://api-next.thegraph.com/subgraphs/name//` - -Pour travailler sur des requêtes signalées comme comportant des erreurs de validation, vous pouvez utiliser votre outil de requête GraphQL préféré, comme Altair ou [GraphiQL](https://cloud.hasura.io/public/graphiql), et essayer votre requête. Ces outils marqueront également ces erreurs dans leur interface utilisateur, avant même que vous ne l'exécutiez. - -## Comment résoudre les problèmes - -Ci-dessous, vous trouverez toutes les erreurs de validation GraphQL qui pourraient survenir sur vos opérations GraphQL existantes. - -### Les variables, opérations, fragments ou arguments GraphQL doivent être uniques - -Nous avons appliqué des règles pour garantir qu'une opération inclut un ensemble unique de variables, d'opérations, de fragments et d'arguments GraphQL. - -Une opération GraphQL n'est valide que si elle ne contient aucune ambiguïté. - -Pour y parvenir, nous devons nous assurer que certains composants de votre opération GraphQL doivent être uniques. - -Voici un exemple de quelques opérations non valides qui enfreignent ces règles : - -**Nom de requête en double (#UniqueOperationNamesRule)** - -```graphql -# L'opération suivante a violé UniqueOperationName -# règle, puisque nous avons une seule opération avec 2 requêtes -# avec le même nom -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # renommer la deuxième requête - name -} -``` - -**Nom de fragment en double (#UniqueFragmentNamesRule)** - -```graphql -# L'opération suivante a violé la règle UniqueFragmentName -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assigner un nom unique au fragment - metadata -} - -fragment MyFieldsName { # assigner un nom unique au fragment - nom -} -``` - -**Variable en double (#UniqueVariableNamesRule)** - -```graphql -# L'opération suivante viole le UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # conserver la variable pertinente (ici : `$id: String`) - id - ...MyFields -} -``` - -**Nom d'argument en double (#UniqueArgument)** - -```graphql -# L'opération suivante a violé les UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Requête anonyme en double (#LoneAnonymousOperationRule)** - -De plus, l'utilisation de deux opérations anonymes violera la règle « LoneAnonymousOperation » en raison d'un conflit dans la structure de réponse : - -```graphql -# Cela échouera s'il est exécuté ensemble dans -# une seule opération avec les deux requêtes suivantes : -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Ou nommez les deux requêtes : - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Chevauchement des domaines - -Un jeu de sélection GraphQL n'est considéré comme valide que s'il résout correctement l'éventuel jeu de résultats. - -Si un ensemble de sélection spécifique, ou un champ, crée une ambiguïté soit par le champ sélectionné, soit par les arguments utilisés, le service GraphQL ne parviendra pas à valider l'opération. - -Voici quelques exemples d'opérations non valides qui enfreignent cette règle : - -**Conflit d'alias de champs (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# L'alias des champs peut provoquer des conflits, soit avec -# d'autres alias ou d'autres champs qui existent sur le -# Schéma GraphQL. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - nickname: name - originalName: name # alias du champ `name` original - } -} -``` - -\*_Champs en conflit avec des arguments (#OverlappingFieldsCanBeMergedRule)_ - -```graphql -# Différents arguments peuvent conduire à des données différentes, -# donc nous ne pouvons pas supposer que les champs seront les mêmes. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -De plus, dans des cas d'utilisation plus complexes, vous pourriez enfreindre cette règle en utilisant deux fragments susceptibles de provoquer un conflit dans l'ensemble finalement attendu : - -```graphql -query { - # Finalement, nous avons deux définitions de "x", pointant - # vers des champs différents ! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -En plus de cela, les directives GraphQL côté client comme `@skip` et `@include` peuvent conduire à une ambiguïté, par exemple : - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -``` - -[Vous pouvez en savoir plus sur l'algorithme ici.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Variables ou fragments inutilisés - -Une opération GraphQL n'est également considérée comme valide que si tous les composants définis par l'opération (variables, fragments) sont utilisés. - -Voici quelques exemples d'opérations GraphQL qui enfreignent ces règles : - -**Variable inutilisée** (#NoUnusedVariablesRule) - -```graphql -# Invalide, car $someVar n'est jamais utilisé. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Fragment inutilisé** (#NoUnusedFragmentsRule) - -```graphql -# Invalide, car le fragment AllFields n'est jamais utilisé. -query something { - someData -} - -fragment AllFields { # inutilisé :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalide, car le fragment AllFields n'est jamais utilisé. -query something { - someData -} - -# retirer le fragment `AllFields` -``` - -### Ensemble de sélection invalide ou manquant (#ScalarLeafsRule) - -De plus, une sélection de champ GraphQL n'est valide que si les éléments suivants sont validés : - -- Un champ d'objet doit avoir un ensemble de sélection spécifié. -- Un champ de bord (scalaire, énumération) ne doit pas avoir de jeu de sélection spécifié. - -Voici quelques exemples de violations de ces règles avec le schéma suivant : - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Ensemble de sélection invalide** - -```graphql -query { - user { - id { # Invalide, car "id" est de type ID et n'a pas de sous-champs - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Ensemble de sélection manquant** - -```graphql -query { - user { - id - image # `image` nécessite un ensemble de sélection pour les sous-champs! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Valeurs d'arguments incorrectes (#VariablesInAllowedPositionRule) - -Les opérations GraphQL qui transmettent des valeurs codées en dur aux arguments doivent être valides, en fonction de la valeur définie dans le schéma. - -Voici quelques exemples d'opérations non valides qui enfreignent ces règles : - -```graphql -query purposes { - # Si "name" est défini comme "String" dans le schéma, - # cette requête échouera lors de la validation. - purpose(name: 1) { - id - } -} - -# Cela pourrait également se produire lorsqu'une variable incorrecte est définie : - -query purposes($name: Int!) { - # Si "name" est défini comme `String` dans le schéma, - # cette requête échouera lors de la validation, car la - # variable utilisée est de type `Int` - purpose(name: $name) { - id - } -} -``` - -### Type, Variable, Fragment ou Directive connu (#UnknownX) - -L'API GraphQL générera une erreur si un type, une variable, un fragment ou une directive inconnu est utilisé. - -Ces références inconnues doivent être corrigées : - -- renommer si c'était une faute de frappe -- sinon, supprimez - -### Fragment : diffusion ou définition non valide - -**Propagation de fragments non valide (#PossibleFragmentSpreadsRule)** - -Un Fragment ne peut pas être réparti sur un type non applicable. - -Exemple, nous ne pouvons pas appliquer un fragment `Cat` au type `Dog` : - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Définition de fragment non valide (#FragmentsOnCompositeTypesRule)** - -Tout Fragment doit être défini sur (en utilisant `on ...`) un type composite, en bref : objet, interface ou union. - -Les exemples suivants ne sont pas valides, car la définition de fragments sur des scalaires n'est pas valide. - -```graphql -fragment fragOnScalar on Int { - # nous ne pouvons pas définir un fragment sur un scalaire (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` n'est pas un sous-type de `Dog` - somethingElse - } -} -``` - -### Utilisation des directives - -**La directive ne peut pas être utilisée à cet emplacement (#KnownDirectivesRule)** - -Seules les directives GraphQL (« @... ») prises en charge par l'API Graph peuvent être utilisées. - -Voici un exemple avec les directives prises en charge par GraphQL : - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Remarque : `@stream`, `@live`, `@defer` ne sont pas pris en charge._ - -**La directive ne peut être utilisée qu'une seule fois à cet emplacement (#UniqueDirectivesPerLocationRule)** - -Les directives prises en charge par The Graph ne peuvent être utilisées qu'une seule fois par emplacement. - -Ce qui suit n'est pas valide (et redondant) : - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/uk/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/fr/resources/subgraph-studio-faq.mdx similarity index 100% rename from website/src/pages/uk/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/fr/resources/subgraph-studio-faq.mdx diff --git a/website/src/pages/fr/subgraphs/_meta-titles.json b/website/src/pages/fr/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/fr/subgraphs/_meta-titles.json +++ b/website/src/pages/fr/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/fr/subgraphs/_meta.js b/website/src/pages/fr/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/fr/subgraphs/_meta.js +++ b/website/src/pages/fr/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/fr/subgraphs/best-practices/_meta.js b/website/src/pages/fr/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/fr/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/fr/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/fr/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/fr/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/fr/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/fr/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/fr/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/fr/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/fr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/fr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/best-practices/pruning.mdx b/website/src/pages/fr/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/fr/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/best-practices/timeseries.mdx b/website/src/pages/fr/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/fr/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/cookbook/_meta.js b/website/src/pages/fr/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/fr/subgraphs/cookbook/_meta.js +++ b/website/src/pages/fr/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 57cd57c1250f..000000000000 --- a/website/src/pages/fr/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Aperçu - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Ressources additionnelles - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/cookbook/timeseries.mdx b/website/src/pages/fr/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 82176f96bdfd..000000000000 --- a/website/src/pages/fr/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Aperçu - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -L'exemple: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -L'exemple: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -L'exemple: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -L'exemple: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/fr/subgraphs/developing/deploying/_meta.js b/website/src/pages/fr/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/fr/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/fr/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/fr/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/fr/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 1ace101654f2..000000000000 --- a/website/src/pages/fr/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Subgraph Studio FAQ ---- - -## 1. Qu'est-ce que Subgraph Studio ? - -[Subgraph Studio](https://thegraph.com/studio/) est une application pour créer, gérer et publier des subgraphs et des clés API. - -## 2. Comment créer une clé API ? - -Pour créer une API, allez dans Subgraph Studio et connectez votre portefeuille. Vous pourrez cliquer sur l'onglet des clés API en haut. Là, vous pourrez créer une clé API. - -## 3. Puis-je créer plusieurs clés API ? - -Oui ! Vous pouvez créer plusieurs clés API pour les utiliser dans différents projets. Consultez le lien [ici](https://thegraph.com/studio/apikeys/). - -## 4. Comment limiter un domaine pour une clé API ? ? - -Après avoir créé une clé API, dans la section Sécurité, vous pouvez définir les domaines qui peuvent interroger une clé API spécifique. - -## Puis-je transférer mon subgraph à un autre propriétaire ? - -Oui, les subgraphs qui ont été publiés sur Arbitrum One peuvent être transférés vers un nouveau portefeuille ou un Multisig. Vous pouvez le faire en cliquant sur les trois points à côté du bouton 'Publish' sur la page des détails du subgraph et en sélectionnant 'Transfer ownership'. - -Notez que vous ne pourrez plus voir ou modifier le subgraph dans Studio une fois qu'il aura été transféré. - -## Comment trouver les URL de requête pour les sugraphs si je ne suis pas le développeur du subgraph que je veux utiliser ? - -Vous pouvez trouver l'URL de requête de chaque subgraph dans la section Subgraph Details de Graph Explorer. Lorsque vous cliquez sur le bouton "Query", vous serez dirigé vers un volet où vous pourrez voir l'URL de requête du subgraph qui vous intéresse. Vous pouvez ensuite remplacer le placeholder `` par la clé API que vous souhaitez utiliser dans Subgraph Studio. - -N'oubliez pas que vous pouvez créer une clé API et interroger n'importe quel subgraph publié sur le réseau, même si vous créez vous-même un subgraph. Ces requêtes via la nouvelle clé API, sont des requêtes payantes comme n'importe quelle autre sur le réseau. diff --git a/website/src/pages/fr/subgraphs/developing/publishing/_meta.js b/website/src/pages/fr/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/fr/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/fr/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/fr/subgraphs/querying/_meta.js b/website/src/pages/fr/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/fr/subgraphs/querying/_meta.js +++ b/website/src/pages/fr/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/hi/resources/_meta-titles.json b/website/src/pages/hi/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/hi/resources/_meta-titles.json +++ b/website/src/pages/hi/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/hi/resources/_meta.js b/website/src/pages/hi/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/hi/resources/_meta.js +++ b/website/src/pages/hi/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/hi/resources/release-notes/_meta.js b/website/src/pages/hi/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/hi/resources/release-notes/_meta.js rename to website/src/pages/hi/resources/migration-guides/_meta.js diff --git a/website/src/pages/tr/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/hi/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 99% rename from website/src/pages/tr/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/hi/resources/migration-guides/assemblyscript-migration-guide.mdx index a8bb2e376807..85f6903a6c69 100644 --- a/website/src/pages/tr/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/hi/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -167,7 +167,7 @@ However this only works in two scenarios: - Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting on class inheritance (subclass → superclass) -Örnekler: +Examples: ```typescript // primitive casting diff --git a/website/src/pages/hi/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/hi/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/hi/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/hi/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/hi/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index 6bc091a4083a..000000000000 --- a/website/src/pages/hi/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,523 +0,0 @@ ---- -title: असेंबलीस्क्रिप्ट माइग्रेशन गाइड ---- - -अब तक, सबग्राफ [AssemblyScript के पहले संस्करणों](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) में से किसी एक का उपयोग करते रहे हैं। अंत में हमने [नवीनतम उपलब्ध](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) के लिए समर्थन जोड़ा है! 🎉 - -यह सबग्राफ डेवलपर्स को एएस भाषा और मानक पुस्तकालय की नई सुविधाओं का उपयोग करने में सक्षम करेगा। - -यह मार्गदर्शिका `graph-cli`/`graph-ts` नीचे दिए गए संस्करण `0.22.0` का उपयोग करने वाले किसी भी व्यक्ति के लिए लागू है। यदि आप पहले से ही उससे अधिक (या बराबर) संस्करण पर हैं, तो आप पहले से ही असेंबलीस्क्रिप्ट 🙂 के संस्करण `0.19.10` का उपयोग कर रहे हैं - -> नोट: `0.24.0` के अनुसार, `ग्राफ़-नोड` सबग्राफ मेनिफ़ेस्ट में निर्दिष्ट `apiVersion` के आधार पर दोनों संस्करणों का समर्थन कर सकता है। - -## विशेषताएँ - -### नई कार्यक्षमता - -- `TypedArray` को अब `ArrayBuffer` से [नई `रैप` स्टैटिक विधि](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) का उपयोग करके बनाया जा सकता है ([v0. 8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- नई standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` और `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### अनुकूलन - -- `Math` फंक्शन जैसे `exp`, `exp2`, `log`, `log2` और ` pow` को तेज़ वेरिएंट से बदल दिया गया है ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- `Math.mod` को थोड़ा अनुकूलित करें ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- एसटीडी मानचित्र और सेट ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) में कैश अधिक फ़ील्ड एक्सेस करता है -- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### अन्य - -- किसी ऐरे लिटरल के प्रकार का अनुमान अब इसकी सामग्री से लगाया जा सकता है ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- यूनिकोड 13.0.0 में अपडेट किया गया stdlib ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## कैसे करें अपग्रेड? - -1. अपनी मैपिंग `apiVersion` को `subgraph.yaml` में `0.0.6` में बदलें: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. आप जिस `graph-cli` का उपयोग कर रहे हैं उसे `नवीनतम` संस्करण में चलाकर अपडेट करें: - -```bash -# if you have it globally installed -npm install --global @graphprotocol/graph-cli@latest - -# or in your subgraph if you have it as a dev dependency -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. `graph-ts` के लिए भी ऐसा ही करें, लेकिन विश्व स्तर पर स्थापित करने के बजाय, इसे अपनी मुख्य निर्भरताओं में सहेजें: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. भाषा संबंधी परिवर्तनों को ठीक करने के लिए शेष मार्गदर्शिका का पालन करें। -5. `codegen` चलाएँ और `deploy` फिर से करें। - -## ब्रेकिंग परिवर्तन - -### Nullability - -असेंबलीस्क्रिप्ट के पुराने संस्करण पर, आप इस तरह कोड बना सकते हैं: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -हालाँकि नए संस्करण पर, क्योंकि मान अशक्त है, इसके लिए आपको जाँच करने की आवश्यकता है, जैसे: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -या इसे इस तरह मजबूर करें: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -यदि आप अनिश्चित हैं कि किसे चुनना है, तो हम हमेशा सुरक्षित संस्करण का उपयोग करने की सलाह देते हैं। यदि मान मौजूद नहीं है, तो आप अपने सबग्राफ हैंडलर में वापसी के साथ एक शुरुआती if स्टेटमेंट करना चाहते हैं। - -### Variable Shadowing - -इससे पहले कि आप [वैरिएबल शैडोइंग](https://en.wikipedia.org/wiki/Variable_shadowing) कर पाते और इस तरह का कोड काम करेगा: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -हालाँकि अब यह संभव नहीं है, और संकलक इस त्रुटि को लौटाता है: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -यदि आपके पास वेरिएबल शैडोइंग है, तो आपको अपने डुप्लिकेट वेरिएबल्स का नाम बदलने की आवश्यकता होगी। - -### Null Comparisons - -अपने सबग्राफ पर अपग्रेड करने से, कभी-कभी आपको इस तरह की त्रुटियाँ मिल सकती हैं: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -हल करने के लिए आप केवल `if` कथन को कुछ इस तरह से बदल सकते हैं: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -यदि आप == के बजाय != कर रहे हैं तो वही लागू होता है। - -### कास्टिंग - -पहले कास्टिंग करने का सामान्य तरीका केवल `as` कीवर्ड का उपयोग करना था, जैसे: - -```typescript -चलो byteArray = नया ByteArray (10) -चलो uint8Array = byteArray Uint8Array के रूप में // इसके बराबर: byteArray -``` - -However this only works in two scenarios: - -- प्रिमिटिव कास्टिंग (`u8`, `i32`, `bool` जैसे प्रकारों के बीच; उदाहरण: `let b: isize = 10; b as use`); -- क्लास इनहेरिटेंस (सबक्लास → सुपरक्लास) पर अपकास्टिंग - -उदाहरण: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -ऐसे दो परिदृश्य हैं जहां आप कास्ट करना चाहते हैं, लेकिन `as``var` **का उपयोग करना सुरक्षित नहीं है **: - -- क्लास इनहेरिटेंस (सुपरक्लास → सबक्लास) पर डाउनकास्टिंग -- एक सुपरक्लास साझा करने वाले दो प्रकारों के बीच - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -उन मामलों के लिए, आप `changetype` फ़ंक्शन का उपयोग कर सकते हैं: - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -यदि आप केवल अशक्तता को हटाना चाहते हैं, तो आप `as` ऑपरेटर (या `variable`) का उपयोग करना जारी रख सकते हैं, लेकिन सुनिश्चित करें कि आप जानते हैं कि मान शून्य नहीं हो सकता, नहीं तो टूट जाएगा। - -```typescript -// remove nullability -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -अशक्तता मामले के लिए हम [अशक्तता जांच सुविधा](https://www.assemblyscript.org/basics.html#nullability-checks) पर एक नज़र डालने की सलाह देते हैं, यह आपके कोड को साफ कर देगा 🙂 - -साथ ही, हमने कास्टिंग को आसान बनाने के लिए कुछ प्रकारों में कुछ और स्थैतिक विधियाँ जोड़ी हैं, वे हैं: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Nullability check with property access - -[nullability-checks">nullability check सुविधा](https://www.assemblyscript.org/basics.html#nullability-checks) का उपयोग करने के लिए आप या तो `if` स्टेटमेंट या टर्नरी का उपयोग कर सकते हैं ऑपरेटर (`?` और `:`) इस तरह: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// or - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -हालांकि यह केवल तभी काम करता है जब आप `if` / ternary एक चर पर कर रहे हों, संपत्ति के उपयोग पर नहीं, जैसे: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile -``` - -जो इस त्रुटि को आउटपुट करता है: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; -``` - -इस समस्या को ठीक करने के लिए, आप उस प्रॉपर्टी एक्सेस के लिए एक वेरिएबल बना सकते हैं ताकि कंपाइलर अशक्तता जांच जादू कर सके: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) -``` - -### प्रॉपर्टी एक्सेस के साथ ऑपरेटर ओवरलोडिंग - -यदि आप एक अशक्त प्रकार के साथ एक अशक्त प्रकार (एक संपत्ति के उपयोग से) का योग करने की कोशिश करते हैं, तो असेंबलीस्क्रिप्ट कंपाइलर एक संकलन समय त्रुटि चेतावनी देने के बजाय यह चेतावनी देता है कि मानों में से एक अशक्त है, यह सिर्फ चुपचाप संकलित करता है, मौका देता है कोड रनटाइम पर टूटने के लिए। - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should -``` - -हमने इसके लिए असेंबलीस्क्रिप्ट कंपाइलर पर एक मुद्दा खोला है, लेकिन अभी के लिए यदि आप अपने सबग्राफ मैपिंग में इस तरह के ऑपरेशन करते हैं, तो आपको इससे पहले एक अशक्त जांच करने के लिए उन्हें बदलना चाहिए। - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt -``` - -### मूल्य आरंभीकरण - -यदि आपके पास ऐसा कोई कोड है: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -यह संकलित होगा लेकिन रनटाइम पर टूट जाएगा, ऐसा इसलिए होता है क्योंकि मान प्रारंभ नहीं किया गया है, इसलिए सुनिश्चित करें कि आपके सबग्राफ ने उनके मानों को प्रारंभ किया है, जैसे: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -इसके अलावा, यदि आपके पास एक ग्राफक्यूएल इकाई में अशक्त गुण हैं, जैसे: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -और आपके पास इसके समान कोड है: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -आपको `total.amount` मान को इनिशियलाइज़ करना सुनिश्चित करना होगा, क्योंकि यदि आप राशि के लिए अंतिम पंक्ति की तरह एक्सेस करने का प्रयास करते हैं, तो यह क्रैश हो जाएगा। तो आप या तो इसे पहले इनिशियलाइज़ करें: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -या आप इस संपत्ति के लिए एक अशक्त प्रकार का उपयोग नहीं करने के लिए अपनी ग्राफक्यूएल स्कीमा को बदल सकते हैं, फिर हम इसे `कोडजेन` चरण 😉 पर शून्य के रूप में आरंभ करेंगे - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // already initializes non-nullable properties -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### क्लास प्रॉपर्टी इनिशियलाइज़ेशन - -यदि आप किसी भी वर्ग को गुणों के साथ निर्यात करते हैं जो अन्य वर्ग हैं (आपके द्वारा या मानक पुस्तकालय द्वारा घोषित) इस तरह: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -कंपाइलर त्रुटि करेगा क्योंकि आपको या तो उन संपत्तियों के लिए एक इनिशियलाइज़र जोड़ने की आवश्यकता है जो कक्षाएं हैं, या `!` ऑपरेटर जोड़ें: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### सरणी आरंभीकरण - -अनियमित आरंभिक `Array` वर्ग अभी भी सूची की लंबाई को प्रारंभ करने के लिए एक संख्या स्वीकार करता है, हालांकि आपको ध्यान रखना चाहिए क्योंकि `.push` जैसे संचालन वास्तव में आकार में जोड़ने के बजाय बढ़ाएंगे शुरुआत, उदाहरण के लिए:रण - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -आपके द्वारा उपयोग किए जा रहे प्रकारों के आधार पर, उदाहरण के लिए अशक्त वाले, और आप उन्हें कैसे एक्सेस कर रहे हैं, आपको इस तरह की रनटाइम त्रुटि का सामना करना पड़ सकता है: - -``` -निष्पादन विफलता के कारण ERRO हैंडलर को छोड़ दिया गया, त्रुटि: ~lib/array.ts, पंक्ति 110, कॉलम 40 पर संदेश के साथ निरस्त किया गया: संदेश के साथ तत्व प्रकार अशक्त होना चाहिए यदि सरणी छिद्रपूर्ण वासम बैकट्रेस है: 0: 0x19c4 - <अज्ञात>! ~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/ @graphprotocol/graph-ts/वैश्विक/वैश्विक/id_of_type -``` - -वास्तव में शुरुआत में पुश करने के लिए, आपको `Array` आकार शून्य के साथ प्रारंभ करना चाहिए, जैसे: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -या आपको इसे इंडेक्स के माध्यम से बदलना चाहिए: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### ग्राफक्यूएल स्कीमा - -यह सीधे असेंबलीस्क्रिप्ट परिवर्तन नहीं है, लेकिन आपको अपनी `schema.graphql` फ़ाइल को अपडेट करना पड़ सकता है। - -अब आप अपने प्रकारों में उन क्षेत्रों को परिभाषित नहीं कर सकते हैं जो गैर-शून्य सूची हैं। यदि आपके पास ऐसा स्कीमा है: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -आपको सूची प्रकार के सदस्य में एक `!` जोड़ना होगा, जैसे: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -असेंबलीस्क्रिप्ट संस्करणों के बीच अशक्तता के अंतर के कारण यह बदल गया, और यह `src/generated/schema.ts` फ़ाइल से संबंधित है (डिफ़ॉल्ट पथ, आपने इसे बदल दिया होगा)। - -### अन्य - -- विनिर्देश के साथ `Map#set` और `Set#add` संरेखित, `यह` लौटाता है ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- ऑब्जेक्ट लिटरल से प्रारंभ की गई कक्षाएं अब कंस्ट्रक्टर को परिभाषित नहीं कर सकती हैं ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- यदि दोनों ऑपरेंड पूर्णांक हैं, तो `**` बाइनरी ऑपरेशन का परिणाम अब सामान्य भाजक पूर्णांक है। पहले, परिणाम एक फ़्लोट था जैसे `Math/f.pow` ([v0 0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) को कॉल कर रहा हो.11. -- `बूल` पर कास्ट करते समय `NaN` को `गलत` पर जोर दें ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- `i8`/`u8` या `i16`/`u16` प्रकार के छोटे पूर्णांक मान को स्थानांतरित करते समय, केवल 3 क्रमशः 4 न्यूनतम RHS मान के महत्वपूर्ण बिट परिणाम को प्रभावित करते हैं, जो कि `i32.shl` के परिणाम के अनुरूप होता है, केवल RHS मान के 5 सबसे कम महत्वपूर्ण बिट्स से प्रभावित होता है। उदाहरण: `someI8 << 8` ने पहले `0` मान उत्पन्न किया था, लेकिन अब RHS को `8 & 7 = 0` (3 बिट) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- आकार भिन्न होने पर संबंधपरक स्ट्रिंग तुलनाओं का बग समाधान ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/hi/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/hi/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 71a47e6e2ac3..000000000000 --- a/website/src/pages/hi/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,539 +0,0 @@ ---- -title: ग्राफक्यूएल सत्यापन माइग्रेशन गाइड ---- - -जल्द ही `ग्राफ़-नोड` [ग्राफ़क्यूएल सत्यापन विनिर्देश] (https://spec.graphql.org/June2018/#sec-Validation) के 100% कवरेज का समर्थन करेगा। - -`ग्राफ़-नोड` के पिछले संस्करण सभी सत्यापनों का समर्थन नहीं करते थे और अधिक सुंदर प्रतिक्रियाएँ प्रदान करते थे - इसलिए, अस्पष्टता के मामलों में, `ग्राफ़-नोड` अमान्य ग्राफ़क्यूएल संचालन घटकों की अनदेखी कर रहा था। - -ग्राफक्यूएल सत्यापन समर्थन आगामी नई सुविधाओं और ग्राफ नेटवर्क के पैमाने पर प्रदर्शन के लिए स्तंभ है। - -यह क्वेरी प्रतिक्रियाओं के निर्धारण को भी सुनिश्चित करेगा, जो कि ग्राफ नेटवर्क पर एक प्रमुख आवश्यकता है। - -**GraphQL Validations को सक्षम करने से कुछ मौजूदा क्वेरीज़ टूट जाएंगी** जो ग्राफ़ API को भेजी गई हैं। - -उन मान्यताओं का अनुपालन करने के लिए, कृपया माइग्रेशन गाइड का पालन करें। - -> ⚠️ यदि आप सत्यापन शुरू होने से पहले अपने प्रश्नों को माइग्रेट नहीं करते हैं, तो वे त्रुटियां लौटाएंगे और संभवत: आपके फ़्रंटएंड/क्लाइंट को तोड़ देंगे। - -## प्रवासन गाइड - -आप अपने ग्राफक्यूएल संचालन में किसी भी समस्या का पता लगाने और उन्हें ठीक करने के लिए सीएलआई माइग्रेशन टूल का उपयोग कर सकते हैं। वैकल्पिक रूप से आप `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` समापन बिंदु का उपयोग करने के लिए अपने ग्राफ़िकल क्लाइंट के समापन बिंदु को अपडेट कर सकते हैं। इस समापन बिंदु के विरुद्ध अपने प्रश्नों का परीक्षण करने से आपको अपने प्रश्नों में समस्याओं का पता लगाने में मदद मिलेगी। - -> अगर आप [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) या [GraphQL Code Generator](https://the-guild.dev) का इस्तेमाल कर रहे हैं, तो सभी सबग्राफ को माइग्रेट करने की ज़रूरत नहीं है /graphql/codegen), वे पहले से ही सुनिश्चित करते हैं कि आपके प्रश्न मान्य हैं। - -## माइग्रेशन सीएलआई टूल - -**ज्यादातर ग्राफक्यूएल संचालन त्रुटियां समय से पहले आपके कोडबेस में पाई जा सकती हैं।** - -इस कारण से, हम विकास के दौरान या सीआई में आपके ग्राफक्यूएल संचालन को मान्य करने के लिए एक सहज अनुभव प्रदान करते हैं। - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) एक सरल सीएलआई उपकरण है जो किसी दिए गए स्कीमा के खिलाफ ग्राफक्यूएल संचालन को मान्य करने में मदद करता है। - -### शुरू करना - -आप टूल को निम्नानुसार चला सकते हैं: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**टिप्पणियाँ:** - -- $GITHUB_USER, $SUBGRAPH_NAME को उपयुक्त मानों के साथ सेट करें या बदलें। जैसे: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- प्रदान किया गया पूर्वावलोकन स्कीमा URL (https://api-next.thegraph.com/) अत्यधिक दर-सीमित है और एक बार सभी उपयोगकर्ताओं के नए संस्करण में माइग्रेट हो जाने के बाद समाप्त हो जाएगा। **उत्पादन में इसका उपयोग न करें।** -- निम्नलिखित एक्सटेंशन वाली फाइलों में संचालन की पहचान की जाती है [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx `, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` विकल्प)। - -### सीएलआई आउटपुट - -`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` सीएलआई टूल किसी भी ग्राफक्यूएल संचालन त्रुटियों को निम्नानुसार आउटपुट करेगा: - -![सीएलआई से त्रुटि आउटपुट](https://i.imgur.com/x1cBdhq.png) - -प्रत्येक त्रुटि के लिए, आपको विवरण, फ़ाइल पथ और स्थिति, और समाधान उदाहरण के लिए एक लिंक मिलेगा (निम्न अनुभाग देखें)। - -## अपने स्थानीय प्रश्नों को पूर्वावलोकन स्कीमा के विरुद्ध चलाएँ - -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. - -आप उन्हें भेजकर प्रश्नों को आज़मा सकते हैं: - -- `https://api-next.thegraph.com/subgraphs/id/` - -या - -- `https://api-next.thegraph.com/subgraphs/name//` - -उन प्रश्नों पर काम करने के लिए जिन्हें सत्यापन त्रुटियों के रूप में फ़्लैग किया गया है, आप Altair या [GraphiQL](https://cloud.hasura.io/public/graphiql) जैसे अपने पसंदीदा ग्राफ़क्यूएल क्वेरी टूल का उपयोग कर सकते हैं, और अपनी क्वेरी को आज़मा सकते हैं। वे उपकरण आपके चलाने से पहले ही उन त्रुटियों को उनके UI में चिह्नित कर देंगे। - -## मुद्दों को कैसे हल करें - -नीचे, आपको वे सभी ग्राफ़िकल सत्यापन त्रुटियां मिलेंगी जो आपके मौजूदा ग्राफ़िकल ऑपरेशंस पर हो सकती हैं। - -### ग्राफक्यूएल चर, संचालन, टुकड़े या तर्क अद्वितीय होने चाहिए - -हमने यह सुनिश्चित करने के लिए नियम लागू किए हैं कि एक ऑपरेशन में ग्राफक्यूएल चर, संचालन, टुकड़े और तर्कों का एक अनूठा सेट शामिल है। - -एक ग्राफकॉल ऑपरेशन तभी मान्य होता है जब उसमें कोई अस्पष्टता न हो। - -इसे प्राप्त करने के लिए, हमें यह सुनिश्चित करने की आवश्यकता है कि आपके ग्राफकॉल ऑपरेशन में कुछ घटक अद्वितीय होने चाहिए। - -इन नियमों का उल्लंघन करने वाली कुछ अमान्य कार्रवाइयों का उदाहरण यहां दिया गया है: - -**Duplicate Query name (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Or name the two queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### ओवरलैपिंग फील्ड्स - -एक ग्राफक्यूएल चयन सेट को तभी वैध माना जाता है जब वह अंतिम परिणाम सेट को सही ढंग से हल करता है। - -यदि कोई विशिष्ट चयन सेट, या कोई फ़ील्ड, चयनित फ़ील्ड या उपयोग किए गए तर्कों द्वारा अस्पष्टता बनाता है, तो ग्राफ़कॉल सेवा ऑपरेशन को मान्य करने में विफल हो जाएगी। - -इस नियम का उल्लंघन करने वाली अमान्य कार्रवाइयों के कुछ उदाहरण यहां दिए गए हैं: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -साथ ही, अधिक जटिल उपयोग-मामले में, आप दो टुकड़ों का उपयोग करके इस नियम का उल्लंघन कर सकते हैं जो अंततः अपेक्षित सेट में विरोध का कारण बन सकता है: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[एल्गोरिथ्म के बारे में आप यहां और पढ़ सकते हैं।](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### अप्रयुक्त चर या टुकड़े - -एक ग्राफक्यूएल ऑपरेशन को भी केवल तभी वैध माना जाता है जब सभी ऑपरेशन-परिभाषित घटकों (चर, टुकड़े) का उपयोग किया जाता है। - -इन नियमों का उल्लंघन करने वाले ग्राफ़िकल ऑपरेशंस के कुछ उदाहरण यहां दिए गए हैं: - -**Unused variable** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -No translations matched your search -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Invalid or missing Selection-Set (#ScalarLeafsRule) - -इसके अलावा, एक ग्राफक्यूएल फ़ील्ड चयन केवल तभी मान्य होता है जब निम्नलिखित मान्य हो: - -- ऑब्जेक्ट फ़ील्ड में चयन सेट निर्दिष्ट होना चाहिए। -- एक एज फ़ील्ड (स्केलर, एनम) में निर्दिष्ट चयन सेट नहीं होना चाहिए। - -निम्नलिखित स्कीमा के साथ इन नियमों के उल्लंघन के कुछ उदाहरण यहां दिए गए हैं: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Incorrect Arguments values (#VariablesInAllowedPositionRule) - -स्कीमा में परिभाषित मान के आधार पर तर्कों के लिए हार्ड-कोडेड मानों को पास करने वाले ग्राफ़िकल ऑपरेशंस को मान्य होना चाहिए। - -यहां इन नियमों का उल्लंघन करने वाली अमान्य कार्रवाइयों के कुछ उदाहरण दिए गए हैं: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) - -यदि किसी अज्ञात प्रकार, चर, खंड या निर्देश का उपयोग किया जाता है, तो ग्राफकलाइन एपीआई एक त्रुटि उत्पन्न करेगा। - -उन अज्ञात संदर्भों को ठीक किया जाना चाहिए: - -- नाम बदलें अगर यह एक टाइपो था -- अन्यथा हटा दें - -### फ़्रैगमेंट: अमान्य फैलाव या परिभाषा - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** - -एक फ़्रैगमेंट को गैर-लागू प्रकार पर नहीं फैलाया जा सकता है। - -Example, we cannot apply a `Cat` fragment to the `Dog` type: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** - -सभी फ़्रैगमेंट को परिभाषित किया जाना चाहिए (`on ...` का उपयोग करके) एक समग्र प्रकार, संक्षेप में: ऑब्जेक्ट, इंटरफ़ेस, या यूनियन। - -निम्नलिखित उदाहरण अमान्य हैं, क्योंकि स्केलर्स पर अंशों को परिभाषित करना अमान्य है। - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Directives usage - -**Directive cannot be used at this location (#KnownDirectivesRule)** - -Only GraphQL directives (`@...`) supported by The Graph API can be used. - -यहाँ द ग्राफक्यूएल समर्थित निर्देशों के साथ एक उदाहरण दिया गया है: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` are not supported._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** - -ग्राफ़ द्वारा समर्थित निर्देश प्रति स्थान केवल एक बार उपयोग किए जा सकते हैं। - -The following is invalid (and redundant): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/vi/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/hi/resources/subgraph-studio-faq.mdx similarity index 100% rename from website/src/pages/vi/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/hi/resources/subgraph-studio-faq.mdx diff --git a/website/src/pages/hi/subgraphs/_meta-titles.json b/website/src/pages/hi/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/hi/subgraphs/_meta-titles.json +++ b/website/src/pages/hi/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/hi/subgraphs/_meta.js b/website/src/pages/hi/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/hi/subgraphs/_meta.js +++ b/website/src/pages/hi/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/hi/subgraphs/best-practices/_meta.js b/website/src/pages/hi/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/hi/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/hi/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/hi/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/hi/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/hi/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/hi/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/hi/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/hi/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/hi/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/hi/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/best-practices/pruning.mdx b/website/src/pages/hi/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/hi/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/best-practices/timeseries.mdx b/website/src/pages/hi/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/hi/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/cookbook/_meta.js b/website/src/pages/hi/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/hi/subgraphs/cookbook/_meta.js +++ b/website/src/pages/hi/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index 5e86a234262a..000000000000 --- a/website/src/pages/hi/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: सबग्राफ सर्वोत्तम प्रथा 4 - eth_calls से बचकर अनुक्रमण गति में सुधार करें ---- - -## TLDR - -eth_calls वे कॉल हैं जो एक Subgraph से Ethereum नोड पर किए जा सकते हैं। ये कॉल डेटा लौटाने में महत्वपूर्ण समय लेते हैं, जिससे indexing धीमी हो जाती है। यदि संभव हो, तो स्मार्ट कॉन्ट्रैक्ट्स को इस तरह से डिजाइन करें कि वे सभी आवश्यक डेटा उत्पन्न करें ताकि आपको eth_calls का उपयोग न करना पड़े। - -## Eth_calls से बचना एक सर्वोत्तम अभ्यास क्यों है - -Subgraphs को स्मार्ट कॉन्ट्रैक्ट्स से निकले हुए इवेंट डेटा को इंडेक्स करने के लिए ऑप्टिमाइज़ किया गया है। एक subgraph ‘eth_call’ से आने वाले डेटा को भी इंडेक्स कर सकता है, लेकिन इससे subgraph इंडेक्सिंग काफी धीमी हो सकती है क्योंकि ‘eth_calls’ के लिए स्मार्ट कॉन्ट्रैक्ट्स को एक्सटर्नल कॉल्स करने की आवश्यकता होती है। इन कॉल्स की प्रतिक्रिया subgraph पर निर्भर नहीं करती, बल्कि उस Ethereum नोड की कनेक्टिविटी और प्रतिक्रिया पर निर्भर करती है, जिसे क्वेरी किया जा रहा है। हमारे subgraphs में ‘eth_calls’ को कम करके या पूरी तरह से समाप्त करके, हम अपने इंडेक्सिंग स्पीड में उल्लेखनीय सुधार कर सकते हैं। - -### एक eth_call कैसा दिखता है? - -eth_calls अक्सर तब आवश्यक होते हैं जब subgraph के लिए आवश्यक डेटा इमिटेड इवेंट्स के माध्यम से उपलब्ध नहीं होता है। उदाहरण के लिए, एक ऐसा परिदृश्य मानें जहां एक subgraph को यह पहचानने की आवश्यकता है कि क्या ERC20 टोकन एक विशेष पूल का हिस्सा हैं, लेकिन कॉन्ट्रैक्ट केवल एक बुनियादी Transfer इवेंट इमिट करता है और वह इवेंट इमिट नहीं करता जिसमें हमें आवश्यक डेटा हो: - -```yaml -इवेंट Transfer(address indexed from, address indexed to, uint256 value); -``` - -मान लें कि टोकन्स की पूल सदस्यता एक state वेरिएबल getPoolInfo द्वारा निर्धारित होती है। इस मामले में, हमें इस डेटा को क्वेरी करने के लिए एक eth_call का उपयोग करना होगा: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // दिए गए पते पर ERC20 कॉन्ट्रैक्ट इंस्टेंस को बाइंड करें: - let instance = ERC20.bind(event.address) - - // eth_call के जरिए पूल जानकारी प्राप्त करें - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -यह कार्यात्मक है, हालांकि यह आदर्श नहीं है क्योंकि यह हमारे subgraph की indexing को धीमा कर देता है। - -## Eth_calls को कैसे समाप्त करें - -आदर्श रूप से, स्मार्ट कॉन्ट्रैक्ट को सभी आवश्यक डेटा को इवेंट्स के भीतर प्रकट करने के लिए अपडेट किया जाना चाहिए। उदाहरण के लिए, स्मार्ट कॉन्ट्रैक्ट में इवेंट में पूल जानकारी शामिल करने के लिए संशोधन करने से eth_calls की आवश्यकता को समाप्त किया जा सकता है: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -इस अपडेट के साथ, subgraph आवश्यक डेटा को बिना बाहरी कॉल के सीधे अनुक्रमित कर सकता है: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -यह बहुत अधिक प्रदर्शनकारी है क्योंकि इसने eth_calls की आवश्यकता को समाप्त कर दिया है। - -## Eth_calls को ऑप्टिमाइज़ करने का तरीका - -यदि स्मार्ट कॉन्ट्रैक्ट में परिवर्तन करना संभव नहीं है और eth_calls की आवश्यकता है, तो विभिन्न रणनीतियों को सीखने के लिए Improve Subgraph Indexing Performance Easily: Reduce eth_calls पढ़ें जो eth_calls को ऑप्टिमाइज़ करने के तरीके बताती है। - -## Eth_calls के रनटाइम ओवरहेड को कम करना - -जिन eth_calls को समाप्त नहीं किया जा सकता है, उनके द्वारा उत्पन्न रनटाइम ओवरहेड को मैनिफेस्ट में घोषित करके कम किया जा सकता है। जब graph-node एक ब्लॉक को प्रोसेस करता है, तो यह हैंडलर्स के चलने से पहले सभी घोषित eth_calls को समानांतर में प्रदर्शन करता है। जो कॉल घोषित नहीं हैं, वे हैंडलर्स के चलने के समय अनुक्रमिक रूप से निष्पादित होते हैं। रनटाइम में सुधार समानांतर में कॉल्स को करने से आता है, न कि अनुक्रमिक रूप से - यह कुल समय को कम करने में मदद करता है, लेकिन इसे पूरी तरह से समाप्त नहीं करता है। - -वर्तमान में, eth_calls को केवल event handlers के लिए घोषित किया जा सकता है। मैनिफेस्ट में, लिखें - -```yaml -इवेंट: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -हैंडलर: handleTransferWithPool -कॉल्स: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -पीले रंग में हाइलाइट किया गया हिस्सा कॉल डिक्लेरेशन है। कॉल के पहले वाले हिस्से में जो टेक्स्ट लेबल है, वह केवल त्रुटि संदेशों के लिए उपयोग किया जाता है। कॉल के बाद वाला हिस्सा Contract[address].function(params) के रूप में होता है। address और params के लिए मान `event.address` और `event.params.` अनुमेय हैं। - -Handler स्वयं इस eth_call के परिणाम तक ठीक उसी तरह पहुंचता है जैसे पिछले अनुभाग में, अनुबंध से बाइंडिंग करके और कॉल करके। graph-node घोषित eth_calls के परिणामों को मेमोरी में कैश करता है और हैंडलर से कॉल इस मेमोरी कैश से परिणाम प्राप्त करेगा, बजाय इसके कि एक वास्तविक RPC कॉल की जाए। - -नोट: घोषित eth_calls केवल उन subgraphs में किए जा सकते हैं जिनका specVersion >= 1.2.0 है। - -## निष्कर्ष - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 8d20dc0e36fe..000000000000 --- a/website/src/pages/hi/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph सर्वोत्तम प्रथा 2 - @derivedFrom का उपयोग करके अनुक्रमण और क्वेरी की प्रतिक्रियाशीलता में सुधार करें। ---- - -## संक्षेप में - -आपके स्कीमा में ऐरे हजारों प्रविष्टियों से बढ़ने पर एक सबग्राफ के प्रदर्शन को वास्तव में धीमा कर सकते हैं। यदि संभव हो, तो @derivedFrom निर्देशिका का उपयोग करना चाहिए जब आप ऐरे का उपयोग कर रहे हों, क्योंकि यह बड़े ऐरे के निर्माण को रोकता है, हैंडलरों को सरल बनाता है और व्यक्तिगत संस्थाओं के आकार को कम करता है, जिससे अनुक्रमण गति और प्रश्न प्रदर्शन में महत्वपूर्ण सुधार होता है। - -## @derivedFrom निर्देशिका का उपयोग कैसे करें - -आपको बस अपने स्कीमा में अपने एरे के बाद एक @derivedFrom निर्देशिका जोड़ने की आवश्यकता है। ऐसा: - -```graphql -टिप्पणियाँ : [Comment!]! @derivedFrom(field: “post”) -``` - -@derivedFrom कुशल एक से कई संबंध बनाता है, जिससे एक इकाई को संबंधित इकाई में एक फ़ील्ड के आधार पर कई संबंधित इकाइयों के साथ गतिशील रूप से संबंध बनाने की अनुमति मिलती है। यह दृष्टिकोण रिश्ते के दोनों पक्षों को डुप्लिकेट डेटा संग्रहीत करने की आवश्यकता को समाप्त करता है, जिससे subgraph अधिक कुशल बन जाता है। - -### @derivedFrom के लिए उदाहरण उपयोग मामला - -एक गतिशील रूप से बढ़ने वाले ऐरे का एक उदाहरण एक ब्लॉगिंग प्लेटफ़ॉर्म है जहाँ एक “Post” के कई “Comments” हो सकते हैं। - -आइए हम अपनी दो संस्थाओं, Post और Comment के साथ शुरू करते हैं। - -बिना ऑप्टिमाइजेशन के, आप इसे एक ऐरे के साथ इस प्रकार लागू कर सकते हैं: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -इस तरह के ऐरे प्रभावी रूप से संबंध के पोस्ट पक्ष पर अतिरिक्त Comments डेटा को संग्रहीत करेंगे। - -यहाँ एक अनुकूलित संस्करण है जो @derivedFrom का उपयोग करता है: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -बस @derivedFrom निर्देश जोड़ने से, यह स्कीमा केवल संबंध के “Comments” पक्ष पर “Comments” को संग्रहीत करेगा और संबंध के “Post” पक्ष पर नहीं। ऐरे व्यक्तिगत पंक्तियों में संग्रहीत होते हैं, जिससे उन्हें काफी विस्तार करने की अनुमति मिलती है। यदि उनका विकास अनियंत्रित है, तो इससे विशेष रूप से बड़े आकार हो सकते हैं। - -यह न केवल हमारे subgraph को अधिक प्रभावी बनाएगा, बल्कि यह तीन विशेषताओं को भी अनलॉक करेगा: - -1. हम Post को क्वेरी कर सकते हैं और इसके सभी कमेंट्स देख सकते हैं। - -2. हम एक रिवर्स लुकअप कर सकते हैं और किसी भी Comment को क्वेरी कर सकते हैं और देख सकते हैं कि यह किस पोस्ट से आया है। - -3. हम [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) का उपयोग कर सकते हैं ताकि हमारे Subgraph मैपिंग में वर्चुअल संबंधों से डेटा को सीधे एक्सेस और संपादित करने की क्षमता को अनलॉक किया जा सके। - -## निष्कर्ष - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 2bf58c320368..000000000000 --- a/website/src/pages/hi/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### अवलोकन - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## निष्कर्ष - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## अतिरिक्त संसाधन - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index 763e8ec760e6..000000000000 --- a/website/src/pages/hi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: सबग्राफ सर्वश्रेष्ठ प्रथा 3 - अपरिवर्तनीय संस्थाओं और बाइट्स को आईडी के रूप में उपयोग करके अनुक्रमण और क्वेरी प्रदर्शन में सुधार करें। ---- - -## TLDR - -हमारे schema.graphql फ़ाइल में अमूर्त एंटिटीज और आईडी के लिए बाइट्स का उपयोग सूचक गति और क्वेरी प्रदर्शन में [महत्वपूर्ण सुधार](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) करता है। - -## अमूर्त एंटिटीज - -एक एंटिटी को अमूर्त बनाने के लिए, हम बस एंटिटी में (immutable: true) जोड़ते हैं। - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -Transfer एंटिटी को अमूर्त बनाने से, graph-node एंटिटी को अधिक कुशलता से संसाधित कर सकता है, जिससे indexing गति और क्वेरी की प्रतिक्रिया में सुधार होता है। - -Immutable Entities संरचनाएँ भविष्य में नहीं बदलेंगी। एक आदर्श एंटिटी जो अमूर्त एंटिटी बनेगी, वह एंटिटी होगी जो सीधे ऑन-चेन इवेंट डेटा को लॉग कर रही है, जैसे कि Transfer इवेंट को Transfer एंटिटी के रूप में लॉग किया जाना। - -### हुड के नीचे - -परिवर्तनीय एंटिटियों में एक ‘ब्लॉक रेंज’ होती है जो उनकी वैधता को इंगित करती है। इन एंटिटियों को अपडेट करने के लिए ग्राफ नोड को पिछले संस्करणों की ब्लॉक रेंज को समायोजित करना पड़ता है, जिससे डेटाबेस का कार्यभार बढ़ जाता है। क्वेरियों को भी केवल जीवित एंटिटियों को खोजने के लिए फ़िल्टर करने की आवश्यकता होती है। अमूर्त एंटिटियाँ तेज होती हैं क्योंकि ये सभी जीवित होती हैं और चूंकि ये नहीं बदलेंगी, लिखते समय किसी भी चेक या अपडेट की आवश्यकता नहीं होती, और क्वेरियों के दौरान कोई फ़िल्टरिंग की आवश्यकता नहीं होती। - -### अमूर्त एंटिटीज का उपयोग कब न करें - -अगर आपके पास एक ऐसा फ़ील्ड है जैसे status जिसे समय के साथ संशोधित करने की आवश्यकता है, तो आपको एंटिटी को अमूर्त नहीं बनाना चाहिए। अन्यथा, आपको जब भी संभव हो, अमूर्त एंटिटीज का उपयोग करना चाहिए। - -## Bytes को IDs के रूप में - -हर एंटिटी के लिए एक ID की आवश्यकता होती है। पिछले उदाहरण में, हम देख सकते हैं कि ID पहले से ही Bytes प्रकार की है। - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -हालांकि IDs के लिए अन्य प्रकार संभव हैं, जैसे String और Int8, लेकिन सभी IDs के लिए Bytes प्रकार का उपयोग करने की सिफारिश की जाती है क्योंकि चरित्र स्ट्रिंग्स को बाइनरी डेटा संग्रहीत करने के लिए बाइट स्ट्रिंग्स की तुलना में दोगुना स्थान चाहिए, और UTF-8 चरित्र स्ट्रिंग्स की तुलना करते समय स्थानीय भाषा का ध्यान रखना आवश्यक है, जो बाइट स्ट्रिंग्स की तुलना के लिए उपयोग की जाने वाली बाइटवाइज तुलना की तुलना में बहुत अधिक महंगा है। - -### IDs के रूप में Bytes का उपयोग न करने के कारण - -1. यदि एंटिटी IDs मानव-पठनीय होने चाहिए, जैसे कि ऑटो-इंक्रीमेंटेड न्यूमेरिकल IDs या पठनीय स्ट्रिंग्स, तो IDs के लिए Bytes का उपयोग नहीं किया जाना चाहिए। -2. यदि किसी subgraph के डेटा को दूसरे डेटा मॉडल के साथ एकीकृत किया जा रहा है जो IDs के रूप में Bytes का उपयोग नहीं करता है, तो Bytes के रूप में IDs का उपयोग नहीं किया जाना चाहिए। -3. Indexing और क्वेरीिंग प्रदर्शन में सुधार की आवश्यकता नहीं है। - -### Bytes के रूप में IDs के साथ जोड़ना - -बहुत से subgraphs में एक ID में दो प्रॉपर्टीज को जोड़ने के लिए स्ट्रिंग संयोजन का उपयोग करना एक सामान्य प्रथा है, जैसे कि event.transaction.hash.toHex() + "-" + event.logIndex.toString() का उपयोग करना। हालांकि, चूंकि यह एक स्ट्रिंग लौटाता है, यह subgraph इंडेक्सिंग और क्वेरी प्रदर्शन में महत्वपूर्ण रूप से बाधा डालता है। - -इसके बजाय, हमें event properties को जोड़ने के लिए concatI32() method का उपयोग करना चाहिए। यह रणनीति एक Bytes ID उत्पन्न करती है जो बहुत अधिक performant होती है। - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Bytes के रूप में IDs के साथ क्रमबद्ध करना - -Bytes को IDs के रूप में उपयोग करके क्रमबद्ध करना इस उदाहरण क्वेरी और प्रतिक्रिया में देखे गए अनुसार उपयुक्त नहीं है। - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -प्रश्न प्रतिक्रिया: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -IDs,hex के रूप में वापस किए जाते हैं। - -क्रमबद्धता में सुधार करने के लिए, हमें इकाई पर एक और फ़ील्ड बनानी चाहिए जो एक BigInt हो। - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -इससे क्रमबद्धता को क्रमिक रूप से अनुकूलित करने की अनुमति मिलेगी। - -Query - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -प्रश्न प्रतिक्रिया: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## निष्कर्ष - -Immutable Entities और Bytes को IDs के रूप में उपयोग करने से subgraph की दक्षता में उल्लेखनीय सुधार हुआ है। विशेष रूप से, परीक्षणों ने क्वेरी प्रदर्शन में 28% तक की वृद्धि और indexing स्पीड में 48% तक की तेजी को उजागर किया है। - -इस ब्लॉग पोस्ट में, Edge & Node के सॉफ़्टवेयर इंजीनियर डेविड लुटरकोर्ट द्वारा Immutable Entities और Bytes को IDs के रूप में उपयोग करने के बारे में और अधिक पढ़ें: [दो सरल Subgraph प्रदर्शन सुधार।](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/cookbook/pruning.mdx b/website/src/pages/hi/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index f2ddfe9da791..000000000000 --- a/website/src/pages/hi/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: सबग्राफ बेस्ट प्रैक्टिस 1 - सबग्राफ प्रूनिंग के साथ क्वेरी की गति में सुधार करें ---- - -## TLDR - -Pruning(/developing/creating-a-subgraph/#prune), subgraph के डेटाबेस से दिए गए ब्लॉक तक की archival entities को हटाता है, और unused entities को subgraph के डेटाबेस से हटाने से subgraph की query performance में सुधार होगा, अक्सर काफी हद तक। indexerHints का उपयोग करना subgraph को prune करने का एक आसान तरीका है। - -## IndexerHints के साथ subgraph को prune करने का तरीका - -Manifest में एक section को indexerHints के नाम से जोड़ें। - -indexerHints में तीन prune विकल्प होते हैं: - -- prune: auto: आवश्यक न्यूनतम इतिहास को बनाए रखता है जैसा कि Indexer द्वारा निर्धारित किया गया है, जो क्वेरी प्रदर्शन को अनुकूलित करता है। यह सामान्यतः अनुशंसित सेटिंग है और यह सभी subgraphs के लिए डिफ़ॉल्ट है जो graph-cli >= 0.66.0 द्वारा बनाए गए हैं। -- `prune: `: ऐतिहासिक ब्लॉकों को बनाए रखने की संख्या पर एक कस्टम सीमा निर्धारित करता है। -- `prune: never`: ऐतिहासिक डेटा का कोई छंटाई नहीं; पूरी इतिहास को बनाए रखता है और यह डिफ़ॉल्ट है यदि indexerHints अनुभाग नहीं है। `prune: never` को तब चुना जाना चाहिए यदि Time Travel Queries (/subgraphs/querying/graphql-api/#time-travel-queries) की आवश्यकता हो। - -हम अपने 'subgraph' में indexerHints जोड़ सकते हैं हमारे subgraph.yaml को अपडेट करके: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## महत्वपूर्ण विचार - -- यदि Time Travel Queries(/subgraphs/querying/graphql-api/#time-travel-queries) की आवश्यकता हो और प्रूनिंग भी की जाए, तो Time Travel Query कार्यक्षमता बनाए रखने के लिए प्रूनिंग को सही ढंग से किया जाना चाहिए। इसलिए, Time Travel Queries के साथ indexerHints: prune: auto का उपयोग करना सामान्यतः अनुशंसित नहीं है। इसके बजाय, उस ब्लॉक ऊंचाई तक सही ढंग से प्रून करने के लिए `indexerHints: prune: ` का उपयोग करें जो Time Travel Queries के लिए आवश्यक ऐतिहासिक डेटा को बनाए रखता है, या prune: never का उपयोग करें ताकि सभी डेटा बनाए रखा जा सके। - -- यह 'grafting' (/subgraphs/cookbook/grafting/) को उस ब्लॉक ऊँचाई पर करना संभव नहीं है जो छंटनी (pruned) की गई है। यदि 'grafting' नियमित रूप से किया जाता है और छंटनी (pruning) की आवश्यकता है, तो यह अनुशंसा की जाती है कि `indexerHints: prune: ` का उपयोग किया जाए जो सटीक रूप से एक निश्चित संख्या में ब्लॉकों (जैसे, छह महीने के लिए पर्याप्त) को बनाए रखेगा। - -## निष्कर्ष - -Pruning का उपयोग indexerHints से करना एक सर्वोत्तम प्रथा है subgraph विकास के लिए, जो महत्वपूर्ण क्वेरी प्रदर्शन सुधार प्रदान करता है। - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/cookbook/timeseries.mdx b/website/src/pages/hi/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 1e4dfb156048..000000000000 --- a/website/src/pages/hi/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## अवलोकन - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### महत्वपूर्ण विचार - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -उदाहरण: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -उदाहरण: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -उदाहरण: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -उदाहरण: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### निष्कर्ष - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/hi/subgraphs/developing/deploying/_meta.js b/website/src/pages/hi/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/hi/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/hi/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 9145a176e333..000000000000 --- a/website/src/pages/hi/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: सबग्राफ स्टूडियो अक्सर पूछे जाने वाले प्रश्न ---- - -## 1. सबग्राफ स्टूडियो क्या है? - -[सबग्राफ स्टूडियो](https://thegraph.com/studio/) सबग्राफ और एपीआई key बनाने, प्रबंधित करने और प्रकाशित करने के लिए एक डैप है। - -## 2. मैं एक एपीआई कुंजी कैसे बना सकता हूँ? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. क्या मैं कई एपीआई कुंजियां बना सकता हूं? - -हाँ! आप विभिन्न परियोजनाओं में उपयोग करने के लिए कई एपीआई keys बना सकते हैं। [यहां](https://thegraph.com/studio/apikeys/) लिंक देखें। - -## 4. मैं एपीआई कुंजी के लिए डोमेन को कैसे प्रतिबंधित करूं? - -एपीआई key बनाने के बाद, सुरक्षा अनुभाग में, आप उन डोमेन को परिभाषित कर सकते हैं जो किसी विशिष्ट एपीआई key को क्वेरी कर सकते हैं। - -## 5. क्या मैं अपना सबग्राफ किसी अन्य स्वामी को स्थानांतरित कर सकता हूं? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -ध्यान दें कि एक बार स्थानांतरित हो जाने के बाद आप स्टूडियो में सबग्राफ को देख या संपादित नहीं कर पाएंगे। - -## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -याद रखें कि आप एक एपीआई key बना सकते हैं और नेटवर्क पर प्रकाशित किसी सबग्राफ को क्वेरी कर सकते हैं, भले ही आप स्वयं एक सबग्राफ बनाते हों। नई एपीआई key के माध्यम से ये प्रश्न, नेटवर्क पर किसी अन्य के रूप में भुगतान किए गए प्रश्न हैं। diff --git a/website/src/pages/hi/subgraphs/developing/publishing/_meta.js b/website/src/pages/hi/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/hi/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/hi/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/hi/subgraphs/querying/_meta.js b/website/src/pages/hi/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/hi/subgraphs/querying/_meta.js +++ b/website/src/pages/hi/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/it/resources/_meta-titles.json b/website/src/pages/it/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/it/resources/_meta-titles.json +++ b/website/src/pages/it/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/it/resources/_meta.js b/website/src/pages/it/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/it/resources/_meta.js +++ b/website/src/pages/it/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/it/resources/release-notes/_meta.js b/website/src/pages/it/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/it/resources/release-notes/_meta.js rename to website/src/pages/it/resources/migration-guides/_meta.js diff --git a/website/src/pages/de/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/it/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 99% rename from website/src/pages/de/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/it/resources/migration-guides/assemblyscript-migration-guide.mdx index fb1ad8beb382..85f6903a6c69 100644 --- a/website/src/pages/de/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/it/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -256,7 +256,7 @@ let something: string | null = 'data' let somethingOrElse = something ? something : 'else' -// oder +// or let somethingOrElse @@ -432,7 +432,7 @@ export class Something { constructor(public value: Thing) {} } -// oder +// or export class Something { value: Thing @@ -442,7 +442,7 @@ export class Something { } } -// oder +// or export class Something { value!: Thing diff --git a/website/src/pages/it/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/it/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/it/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/it/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/it/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index b6bd7ecc38d2..000000000000 --- a/website/src/pages/it/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: Guida alla migrazione di AssemblyScript ---- - -Finora i subgraph utilizzavano una delle [prime versioni di AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finalmente abbiamo aggiunto il supporto per la [più recente disponibile](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 - -Ciò consentirà agli sviluppatori di subgraph di utilizzare le nuove caratteristiche del linguaggio AS e della libreria standard. - -Questa guida si applica a chiunque utilizzi `graph-cli`/ `graph-ts` al di sotto della versione `0.22.0`. Se siete già a una versione superiore (o uguale) a questa, avete già utilizzato la versione `0.19.10` di AssemblyScript 🙂 - -> Nota: A partire da `0.24.0`, `graph-node` può supportare entrambe le versioni, a seconda della `apiVersion` specificata nel manifest del subgraph. - -## Caratteristiche - -### Nuova funzionalità - -- `TypedArray` possono ora essere costruiti da `ArrayBuffer` utilizzando il [nuovo metodo statico `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nuove funzioni di libreria standard: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` e `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Aggiunto il supporto per x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Aggiunto `StaticArray`, una variante di array più efficiente ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Aggiunto `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementato l'argomento `radix` su `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Aggiunto il supporto per i separatori nei letterali in virgola mobile ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Aggiunto il supporto per le funzioni di prima classe ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Aggiunti i builtin: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementati `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Aggiunto il supporto per le stringhe letterali dei template ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Aggiunto `encodeURI(Component)` e `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Aggiunto `toString`, `toDateString` e `toTimeString` a `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Aggiunto `toUTCString` per `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Aggiunto il tipo builtin `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Ottimizzazioni - -- Le funzioni `matematiche` come `exp`, `exp2`, `log`, `log2` e `pow` sono state sostituite da varianti più rapide ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Ottimizzato leggermente `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cache di più accessi ai campi in std Map e Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Ottimizzato per le potenze di due in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Altro - -- Il tipo di un letterale di array può ora essere dedotto dal suo contenuto ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Aggiornato stdlib a Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## Come aggiornare? - -1. Modificare le mappature `apiVersion` in `subgraph.yaml` a `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Aggiornare il `graph-cli` in uso alla versione `ultima` eseguendo: - -```bash -# se è installato globalmente -npm install --global @graphprotocol/graph-cli@latest - -# o nel proprio subgraph, se è una dipendenza di dev -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Fare lo stesso per `graph-ts`, ma invece di installarlo globalmente, salvarlo nelle dipendenze principali: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Seguire il resto della guida per correggere le modifiche alla lingua. -5. Eseguire di nuovo `codegen` e `deploy`. - -## Cambiamenti di rottura - -### Nullabilità - -Nella versione precedente di AssemblyScript, era possibile creare codice come questo: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -Tuttavia, nella versione più recente, poiché il valore è nullable, è necessario effettuare un controllo, come questo: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -Oppure forzarla in questo modo: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -Se non si è sicuri di quale scegliere, si consiglia di utilizzare sempre la versione sicura. Se il valore non esiste, si potrebbe fare una dichiarazione if anticipata con un ritorno nel gestore del subgraph. - -### Shadowing della variabile - -Prima si poteva fare lo [ shadowing della variabile](https://en.wikipedia.org/wiki/Variable_shadowing) e il codice come questo funzionava: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Tuttavia ora questo non è più possibile e il compilatore restituisce questo errore: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -È necessario rinominare le variabili duplicate se si dispone di un'ombreggiatura delle variabili. - -### Confronti nulli - -Eseguendo l'aggiornamento sul subgraph, a volte si possono ottenere errori come questi: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -Per risolvere il problema è sufficiente modificare l'istruzione `if` in qualcosa di simile: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -Lo stesso vale se si fa != invece di ==. - -### Casting - -Il modo più comune per effettuare il casting era quello di utilizzare la parola chiave `as`, come in questo caso: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -Tuttavia, questo funziona solo in due scenari: - -- Casting primitivo (tra tipi come `u8`, `i32`, `bool`; ad esempio: `let b: isize = 10; b as usize`); -- Upcasting sull'ereditarietà delle classi (subclasse → superclasse) - -Esempi: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -Ci sono due scenari in cui potreste voler effettuare casting, ma l'uso `as`/`var` **non è sicuro**: - -- Downcasting sull'ereditarietà delle classi (superclasse → subclasse) -- Tra due tipi che condividono una superclasse - -```typescript -// downcasting sull'ereditarietà delle classi -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // si interrompe in fase di esecuzione :( -``` - -```typescript -// tra due tipi che condividono una superclasse -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // si interrompe in fase di esecuzione :( -``` - -In questi casi, si può usare la funzione `changetype`: - -```typescript -// downcasting sull'ereditarietà delle classi -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // funziona :) -``` - -```typescript -// tra due tipi che condividono una superclasse -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // funziona :) -``` - -Se si vuole solo rimuovere la nullità, si può continuare a usare l'operatore `as` (oppure `variabile`), ma assicurarsi di sapere che il valore non può essere nullo, altrimenti si interromperà. - -```typescript -// rimuovere la nullità -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -Per il caso della nullità si consiglia di dare un'occhiata alla [funzione verifica della nullità](https://www.assemblyscript.org/basics.html#nullability-checks), che renderà il codice più pulito 🙂 - -Inoltre abbiamo aggiunto alcuni metodi statici in alcuni tipi per facilitare il casting, che sono: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Verifica della nullità con accesso alle proprietà - -Per utilizzare la [di funzione controllo della nullità](https://www.assemblyscript.org/basics.html#nullability-checks) si possono usare le istruzioni `if` oppure l'operatore ternario (`?` e `:`) come questo: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// or - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -Tuttavia, questo funziona solo quando si esegue il `if` / ternario su una variabile, non sull'accesso a una proprietà, come in questo caso: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // non viene compilato -``` - -Che produce questo errore: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -Per risolvere questo problema, si può creare una variabile per l'accesso alla proprietà, in modo che il compilatore possa fare la magia del controllo di annullabilità: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // viene compilato benissimo :) -``` - -### Sovraccarico dell'operatore con accesso alle proprietà - -Se si tenta di sommare (ad esempio) un tipo nullable (da un accesso a una proprietà) con uno non nullable, il compilatore di AssemblyScript, invece di dare un avviso di errore in fase di compilazione sul fatto che uno dei valori è nullable, si limita a compilare in silenzio, dando la possibilità che il codice si interrompa in fase di esecuzione. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // non dà errori in fase di compilazione come dovrebbe -``` - -Abbiamo aperto un problema sul compilatore AssemblyScript per questo, ma per il momento se fate questo tipo di operazioni nelle vostre mappature di subgraph, dovreste modificarle in modo da fare un controllo di null prima di esse. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // ora `n` è garantito essere un BigInt -``` - -### Inizializzazione del valore - -Se si dispone di codice come questo: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Verrà compilato ma si interromperà in fase di esecuzione, perché il valore non è stato inizializzato, quindi assicuratevi che il vostro subgraph abbia inizializzato i suoi valori, in questo modo: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -Inoltre, se si hanno proprietà nullable in un'entità GraphQL, come questa: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -E avete un codice simile a questo: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -È necessario assicurarsi di inizializzare il valore `total.amount`, perché se si tenta di accedervi come nell'ultima riga per la somma, il programma si blocca. Quindi bisogna inizializzarlo prima: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -Oppure si può semplicemente modificare lo schema GraphQL per non utilizzare un tipo nullable per questa proprietà, quindi la inizializzeremo come zero nel passaggio `codegen` 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // inizializza già le proprietà non nulle -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Inizializzazione delle proprietà della classe - -Se si esportano classi con proprietà che sono altre classi (dichiarate dall'utente o dalla libreria standard) come questa: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -Il compilatore darà un errore perché è necessario aggiungere un initializer per le proprietà che sono classi, oppure aggiungere l'operatore `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### Inizializzazione del Array - -La classe `Array` accetta ancora un numero per inizializzare la lunghezza dell'elenco, ma bisogna fare attenzione perché operazioni come `.push` aumentano effettivamente la dimensione invece di aggiungere all'inizio, ad esempio: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -A seconda dei tipi utilizzati, ad esempio quelli nullable, e del modo in cui vi si accede, si potrebbe verificare un errore di runtime come questo: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -Per spingere effettivamente all'inizio, si dovrebbe inizializzare l'`Array` con dimensione zero, in questo modo: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -Oppure si dovrebbe mutare tramite indice: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### Schema GraphQL - -Non si tratta di una modifica diretta di AssemblyScript, ma potrebbe essere necessario aggiornare il file `schema.graphql`. - -Ora non è più possibile definire campi nei tipi che sono elenchi non nulli. Se si ha uno schema come questo: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -Si dovrà aggiungere un `!` al membro del tipo List, in questo modo: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -Questo è cambiato a causa delle differenze di nullabilità tra le versioni di AssemblyScript ed è legato al file `src/generated/schema.ts` (percorso predefinito, potrebbe essere stato modificato). - -### Altro - -- Allinea `Map#set` e `Set#add` con le specifiche, restituendo `questo` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Gli Array non ereditano più da ArrayBufferView, ma sono ora distinti ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Le classi inizializzate a partire da letterali di oggetti non possono più definire un costruttore ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Il risultato di un'operazione binaria `**` è ora l'intero a denominatore comune se entrambi gli operandi sono interi. In precedenza, il risultato era un float, come se si chiamasse `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerenzia `NaN` a `false` quando viene lanciato a `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Quando si sposta un piccolo valore intero di tipo `i8`/`u8` oppure `i16`/`u16`, solo i 3 o 4 bit meno significativi del valore RHS influiscono sul risultato, analogamente al risultato di un `i32.shl` che viene influenzato solo dai 5 bit meno significativi del valore RHS. Esempio: `someI8 << 8` prima produceva il valore `0`, ma ora produce `someI8` a causa del mascheramento del RHS come `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Correzione del bug dei confronti tra stringhe relazionali quando le dimensioni sono diverse ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/it/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/it/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 067bf445e437..000000000000 --- a/website/src/pages/it/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: Guida alla migrazione delle validazione GraphQL ---- - -Presto `graph-node` supporterà il 100% di copertura delle specifiche [Specifiche delle validation GraphQL] (https://spec.graphql.org/June2018/#sec-Validation). - -Le versioni precedenti di `graph-node` non supportavano tutte le validation e fornivano risposte più aggraziate, per cui, in caso di ambiguità, `graph-node` ignorava i componenti delle operazioni GraphQL non validi. - -Il supporto delle validation GraphQL è il pilastro delle nuove funzionalità e delle prestazioni su scala di The Graph Network. - -Garantirà inoltre il determinismo delle risposte alle query, un requisito fondamentale per The Graph Network. - -**L'abilitazione delle validation GraphQL interromperà alcune query esistenti** inviate all'API The Graph. - -Per essere conformi a tali validation, seguire la guida alla migrazione. - -> ⚠️ Se non si migrano le query prima dell'introduzione delle validation, queste restituiranno errori e potrebbero interrompere i frontend/client. - -## Guida alla migrazione - -È possibile utilizzare lo strumento di migrazione CLI per trovare eventuali problemi nelle operazioni GraphQL e risolverli. In alternativa, è possibile aggiornare l'endpoint del client GraphQL per utilizzare l'endpoint `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`. Testare le query con questo endpoint vi aiuterà a trovare i problemi nelle vostre query. - -> Non è necessario migrare tutti i subgraph; se si utilizza [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) o [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), questi garantiscono già la validità delle query. - -## Strumento CLI di migrazione - -**La maggior parte degli errori delle operazioni GraphQL può essere individuata in anticipo nella propria codebase.** - -Per questo motivo, forniamo un'esperienza agevole per la validating delle operazioni GraphQL durante lo sviluppo o in CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) è un semplice strumento CLI che aiuta a validare le operazioni GraphQL rispetto a un determinato schema. - -### **Per cominciare** - -Lo strumento può essere eseguito nel modo seguente: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Note:** - -- Impostare o sostituire $GITHUB_USER, $SUBGRAPH_NAME con i valori appropriati. Ad esempio: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- L'URL dello schema di anteprima (https://api-next.thegraph.com/) fornito è fortemente limitato e verrà abbandonato una volta che tutti gli utenti saranno migrati alla nuova versione. **Non usarlo in produzione.** -- Le operazioni sono identificate in file con le seguenti estensioni [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (opzione `-o`). - -### Output CLI - -Lo strumento `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI mostrerà gli errori delle operazioni GraphQL come segue: - -![Output di errore da CLI](https://i.imgur.com/x1cBdhq.png) - -Per ogni errore sono disponibili una descrizione, il percorso e la posizione del file e un link a un esempio di soluzione (vedere la sezione seguente). - -## Eseguire le query locali sullo schema di anteprima - -Forniamo un endpoint `https://api-next.thegraph.com/` che esegue una versione di `graph-node` con le validation attivate. - -È possibile provare le query inviandole a: - -- `https://api-next.thegraph.com/subgraphs/id/` - -oppure - -- `https://api-next.thegraph.com/subgraphs/name//` - -Per lavorare sulle query che sono state contrassegnate da errori di validazione, si può usare il proprio strumento preferito di query GraphQL, come Altair o [GraphiQL](https://cloud.hasura.io/public/graphiql), e provare la query. Questi strumenti segnaleranno gli errori nella loro interfaccia utente, anche prima dell'esecuzione. - -## Come risolvere i problemi - -Di seguito sono riportati tutti gli errori di validation GraphQL che potrebbero verificarsi nelle operazioni GraphQL esistenti. - -### Le variabili, le operazioni, i frammenti o gli argomenti di GraphQL devono essere unici - -Abbiamo applicato regole per garantire che un'operazione includa un set unico di variabili GraphQL, operazioni, frammenti e argomenti. - -Un'operazione GraphQL è valida solo se non contiene alcuna ambiguità. - -A tal fine, è necessario garantire che alcuni componenti dell'operazione GraphQL siano unici. - -Ecco un esempio di alcune operazioni non valide che violano queste regole: - -**Nome della query duplicato (#UniqueOperationNamesRule)** - -```graphql -# La seguente operazione ha violato la regola UniqueOperationName -# la regola UniqueOperationName poiché abbiamo una singola operazione con 2 query -# con lo stesso nome -query myData { - id -} - -query myData { - name -} -``` - -_Soluzione:_ - -```graphql -query myData { - id -} - -query myData2 { - # rinominare la seconda query - name -} -``` - -**Nome del frammento duplicato (#UniqueFragmentNamesRule)** - -```graphql -# L'operazione seguente ha violato -# la regola UniqueFragmentName. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Soluzione:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assegna un nome unico al frammento - metadati -} - -fragment MyFieldsName { # assegna un nome unico al frammento - nome -} -``` - -**Nome variabile duplicato (#UniqueVariableNamesRule)** - -```graphql -# L'operazione seguente viola la norma UniqueVariables. -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Soluzione:_ - -```graphql -query myData($id: String) { - # mantiene la variabile rilevante (qui: `$id: String`) - id - ...MyFields -} -``` - -**Nome dell'argomento duplicato (#UniqueArgument)** - -```graphql -# L'operazione seguente ha violato la norma UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Soluzione:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicazione della query anonima (#LoneAnonymousOperationRule)** - -Inoltre, l'uso di due operazioni anonime violerà la norma `LoneAnonymousOperation`, a causa di un conflitto nella struttura della risposta: - -```graphql -# Questa operazione fallirà se eseguita insieme in -# un'unica operazione con le due query seguenti: -query { - someField -} - -query { - otherField -} -``` - -_Soluzione:_ - -```graphql -query { - someField - otherField -} -``` - -Oppure dare un nome alle due query: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Campi sovrapposti - -Un set di selezione GraphQL è considerato valido solo se risolve correttamente l'eventuale set di risultati. - -Se uno specifico set di selezione, o un campo, crea ambiguità sia per il campo selezionato che per gli argomenti utilizzati, il servizio GraphQL non riuscirà a validare l'operazione. - -Ecco alcuni esempi di operazioni non valide che violano questa regola: - -**Alias di campi in conflitto (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# L'aliasing dei campi potrebbe causare conflitti, sia con -# altri alias o con altri campi che esistono nello -# schema GraphQL. -query { - dogs { - name: nickname - name - } -} -``` - -_Soluzione:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Campi in conflitto con gli argomenti (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Argomenti diversi possono portare a dati diversi, -# quindi non possiamo presumere che i campi siano gli stessi. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Soluzione:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Inoltre, in casi d'uso più complessi, si potrebbe violare questa regola utilizzando due frammenti che potrebbero causare un conflitto nell'insieme atteso: - -```graphql -query { - # Alla fine, abbiamo due definizioni di "x", che puntano - # a campi diversi! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -Inoltre, le direttive GraphQL lato client come `@skip` e `@include` potrebbero portare ad ambiguità, ad esempio: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[Per saperne di più sull'algoritmo, consultare qui.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Variabili o frammenti non utilizzati - -Un'operazione GraphQL è considerata valida solo se vengono utilizzati tutti i componenti definiti dall'operazione (variabili, frammenti). - -Ecco alcuni esempi di operazioni GraphQL che violano queste regole: - -**Variabile inutilizzata** (#NoUnusedVariablesRule) - -```graphql -# Non valido, perché $someVar non viene mai utilizzato. -query something($someVar: String) { - someData -} -``` - -_Soluzione:_ - -```graphql -query something { - someData -} -``` - -**Frammento non utilizzato** (#NoUnusedFragmentsRule) - -```graphql -# Non valido, perché il frammento AllFields non viene mai utilizzato. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Soluzione:_ - -```graphql -# Non valido, perché il frammento AllFields non viene mai utilizzato. -query something { - someData -} - -# rimuovere il frammento `AllFields` -``` - -### Set di selezione non valido o mancante (#ScalarLeafsRule) - -Inoltre, la selezione di un campo GraphQL è valida solo se il seguente è validato: - -- Un campo oggetto deve avere un set di selezione specificato. -- Un campo bordo (scalare, enum) non deve avere un set di selezione specificato. - -Ecco alcuni esempi di violazione di queste regole con il seguente schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Set di selezione non valido** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Soluzione:_ - -```graphql -query { - user { - id - } -} -``` - -**Set di selezione mancante** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Soluzione:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Valori degli argomenti non corretti (#VariablesInAllowedPositionRule) - -Le operazioni GraphQL che passano valori codificati agli argomenti devono essere validi, in base al valore definito nello schema. - -Ecco alcuni esempi di operazioni non valide che violano queste regole: - -```graphql -scopi della query { - # Se "name" è definito come "String" nello schema, - # questa query fallirà durante la validazione. - purpose(name: 1) { - id - } -} - -# Questo può accadere anche quando viene definita una variabile non corretta: - -scopi della query ($name: Int!) { - # Se "name" è definito come `String` nello schema, - # questa query fallirà durante la validazione, perché la - # variabile utilizzata è di tipo `Int`. - purpose(name: $name) { - id - } -} -``` - -### Tipo, variabile, frammento o direttiva sconosciuti (#UnknownX) - -L'API GraphQL solleverà un errore se viene utilizzato un tipo, una variabile, un frammento o una direttiva sconosciuti. - -Questi riferimenti sconosciuti devono essere corretti: - -- rinominare se si tratta di un errore di battitura -- altrimenti, rimuovere - -### Frammento: diffusione o definizione non valida - -**Diffusione del frammento non valida (#PossibleFragmentSpreadsRule)** - -Un frammento non può essere distribuito su un tipo non applicabile. - -Ad esempio, non si può applicare un frammento `Cat` al tipo `Dog`: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Definizione di frammento non valida (#FragmentsOnCompositeTypesRule)** - -Tutti i frammenti devono essere definiti su (usando `on ...`) un tipo composito, in breve: oggetto, interfaccia o unione. - -Gli esempi seguenti non sono validi, poiché la definizione di frammenti su scalari non è valida. - -```graphql -fragment fragOnScalar on Int { - # non possiamo definire un frammento su uno scalare (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` non è un subtipo di `Dog` - somethingElse - } -} -``` - -### Utilizzo delle direttive - -**La direttiva non può essere utilizzata in questa sede (#KnownDirectivesRule)** - -È possibile utilizzare solo le direttive GraphQL (`@...`) supportate da Graph API. - -Ecco un esempio con le direttive supportate da GraphQL: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Nota: `@stream`, `@live`, `@defer` non sono supportati._ - -**La direttiva può essere utilizzata una sola volta in questo luogo (#UniqueDirectivesPerLocationRule)** - -Le direttive supportate da The Graph possono essere utilizzate una sola volta per ogni posizione. - -Il seguente non è valido (e ridondante): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/ar/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/it/resources/subgraph-studio-faq.mdx similarity index 78% rename from website/src/pages/ar/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/it/resources/subgraph-studio-faq.mdx index 74c0228e4093..8761f7a31bf6 100644 --- a/website/src/pages/ar/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/it/resources/subgraph-studio-faq.mdx @@ -1,5 +1,5 @@ --- -title: الأسئلة الشائعة حول الفرعيةرسم بياني استوديو +title: Subgraph Studio FAQs --- ## 1. What is Subgraph Studio? @@ -28,4 +28,4 @@ Note that you will no longer be able to see or edit the subgraph in Studio once You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. -تذكر أنه يمكنك إنشاء API key والاستعلام عن أي subgraph منشور على الشبكة ، حتى إذا قمت ببناء subgraph بنفسك. حيث أن الاستعلامات عبر API key الجديد ، هي استعلامات مدفوعة مثل أي استعلامات أخرى على الشبكة. +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/it/subgraphs/_meta-titles.json b/website/src/pages/it/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/it/subgraphs/_meta-titles.json +++ b/website/src/pages/it/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/it/subgraphs/_meta.js b/website/src/pages/it/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/it/subgraphs/_meta.js +++ b/website/src/pages/it/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/it/subgraphs/best-practices/_meta.js b/website/src/pages/it/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/it/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/it/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/it/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/it/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/it/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/it/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/it/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/it/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/it/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/it/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/best-practices/pruning.mdx b/website/src/pages/it/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/it/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/best-practices/timeseries.mdx b/website/src/pages/it/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/it/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/cookbook/_meta.js b/website/src/pages/it/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/it/subgraphs/cookbook/_meta.js +++ b/website/src/pages/it/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/it/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/it/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 53c1bae83194..000000000000 --- a/website/src/pages/it/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Panoramica - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Additional Resources - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/it/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/cookbook/pruning.mdx b/website/src/pages/it/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/it/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/cookbook/timeseries.mdx b/website/src/pages/it/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index eeb246a2b3d0..000000000000 --- a/website/src/pages/it/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Panoramica - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Example: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Example: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Example: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Example: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/it/subgraphs/developing/deploying/_meta.js b/website/src/pages/it/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/it/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/it/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/it/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/it/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index cb695832e258..000000000000 --- a/website/src/pages/it/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: FAQ di Subgraph Studio ---- - -## 1. Che cos'è Subgraph Studio? - -[Subgraph Studio](https://thegraph.com/studio/) è una dapp per creare, gestire e pubblicare subgraph e chiavi API. - -## 2. Come si crea una chiave API? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. Posso creare più chiavi API? - -Sì, è possibile creare più chiavi API da utilizzare in diversi progetti. Scoprire sul link [qui](https://thegraph.com/studio/apikeys/). - -## 4. Come si limita un dominio per una chiave API? - -Dopo aver creato una chiave API, nella sezione Sicurezza è possibile definire i domini che possono eseguire query di una specifica chiave API. - -## 5. Posso trasferire il mio subgraph a un altro proprietario? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -Si noti che non sarà più possibile vedere o modificare il subgraph nel Studio una volta trasferito. - -## 6. Come posso trovare gli URL di query per i subgraph se non sono lo sviluppatore del subgraph che voglio usare? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -Si ricorda che è possibile creare una chiave API ed eseguire query del qualsiasi subgraph pubblicato sulla rete, anche se si costruisce un subgraph da soli. Queste query tramite la nuova chiave API sono a pagamento, come tutte le altre sulla rete. diff --git a/website/src/pages/it/subgraphs/developing/publishing/_meta.js b/website/src/pages/it/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/it/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/it/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/it/subgraphs/querying/_meta.js b/website/src/pages/it/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/it/subgraphs/querying/_meta.js +++ b/website/src/pages/it/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/ja/resources/_meta-titles.json b/website/src/pages/ja/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/ja/resources/_meta-titles.json +++ b/website/src/pages/ja/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/ja/resources/_meta.js b/website/src/pages/ja/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/ja/resources/_meta.js +++ b/website/src/pages/ja/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/ja/resources/release-notes/_meta.js b/website/src/pages/ja/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/ja/resources/release-notes/_meta.js rename to website/src/pages/ja/resources/migration-guides/_meta.js diff --git a/website/src/pages/mr/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ja/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 63% rename from website/src/pages/mr/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/ja/resources/migration-guides/assemblyscript-migration-guide.mdx index a170ebec8cda..85f6903a6c69 100644 --- a/website/src/pages/mr/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/ja/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -2,13 +2,13 @@ title: AssemblyScript Migration Guide --- -आत्तापर्यंत, सबग्राफ [असेंबलीस्क्रिप्टच्या पहिल्या आवृत्त्यांपैकी एक वापरत आहेत](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). शेवटी आम्ही [नवीन उपलब्ध](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) साठी समर्थन जोडले आहे! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 -हे सबग्राफ विकसकांना AS भाषा आणि मानक लायब्ररीची नवीन वैशिष्ट्ये वापरण्यास सक्षम करेल. +That will enable subgraph developers to use newer features of the AS language and standard library. -ही मार्गदर्शक आवृत्ती `0.22.0` खालील `graph-cli`/`graph-ts` वापरणाऱ्या प्रत्येकासाठी लागू आहे. तुम्‍ही आधीच त्‍याच्‍या पेक्षा वरच्‍या (किंवा समान) आवृत्‍तीवर असल्‍यास, तुम्‍ही असेंबली स्क्रिप्‍टची `0.19.10` आवृत्ती आधीच वापरत आहात 🙂 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> टीप: `0.24.0` नुसार, सबग्राफ मॅनिफेस्टमध्ये निर्दिष्ट केलेल्या `apiVersion` वर अवलंबून `graph-node` दोन्ही आवृत्त्यांना समर्थन देऊ शकते. +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. ## Features @@ -106,7 +106,7 @@ let maybeValue = load()! // breaks in runtime if value is null maybeValue.aMethod() ``` -तुम्हाला कोणती निवड करायची याची खात्री नसल्यास, आम्ही नेहमी सुरक्षित आवृत्ती वापरण्याची शिफारस करतो. जर मूल्य अस्तित्वात नसेल तर तुम्ही तुमच्या सबग्राफ हँडलरमध्ये रिटर्नसह फक्त लवकर इफ स्टेटमेंट करू इच्छित असाल. +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. ### Variable Shadowing @@ -121,7 +121,7 @@ let a = a + b However now this isn't possible anymore, and the compiler returns this error: ```typescript -त्रुटी TS2451: ब्लॉक-स्कोप केलेले व्हेरिएबल 'a' पुन्हा घोषित करू शकत नाही +ERROR TS2451: Cannot redeclare block-scoped variable 'a' let a = a + b; ~~~~~~~~~~~~~ @@ -167,7 +167,7 @@ However this only works in two scenarios: - Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting on class inheritance (subclass → superclass) -उदाहरणे: +Examples: ```typescript // primitive casting @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -तुम्हाला फक्त शून्यता काढून टाकायची असल्यास, तुम्ही `as` ऑपरेटर (किंवा `व्हेरिएबल`) वापरणे सुरू ठेवू शकता, परंतु हे मूल्य शून्य असू शकत नाही याची खात्री करा., अन्यथा तो खंडित होईल. +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove nullability @@ -238,7 +238,7 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -शून्यता प्रकरणासाठी आम्ही [शून्यता तपासणी वैशिष्ट्य](https://www.assemblyscript.org/basics.html#nullability-checks) पाहण्याची शिफारस करतो, ते तुमचा कोड अधिक क्लीनर बनवेल 🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 Also we've added a few more static methods in some types to ease casting, they are: @@ -306,7 +306,7 @@ let somethingOrElse: string = data ? data : 'else' // compiles just fine :) ### Operator overloading with property access -जर तुम्ही (उदाहरणार्थ) रद्द करता येण्याजोगा प्रकार (प्रॉपर्टी ऍक्सेसमधून) नॉन-नलेबल असलेल्या प्रकाराची बेरीज करण्याचा प्रयत्न केला, तर असेंबलीस्क्रिप्ट कंपाइलर संकलित वेळेची त्रुटी चेतावणी देण्याऐवजी मूल्यांपैकी एक रद्द करण्यायोग्य आहे, तो संधी देऊन शांतपणे संकलित करतो. कोड रनटाइममध्ये खंडित होण्यासाठी. +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. ```typescript class BigInt extends Uint8Array { @@ -330,7 +330,7 @@ let wrapper = new Wrapper(y) wrapper.n = wrapper.n + x // doesn't give compile time errors as it should ``` -आम्ही यासाठी असेंबलीस्क्रिप्ट कंपायलरवर एक समस्या उघडली आहे, परंतु आत्ता तुम्ही तुमच्या सबग्राफ मॅपिंगमध्ये अशा प्रकारचे ऑपरेशन करत असल्यास, तुम्ही त्यापूर्वी शून्य तपासणी करण्यासाठी ते बदलले पाहिजेत. +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. ```typescript let wrapper = new Wrapper(y) @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -तुम्हाला `total.amount` मूल्य सुरू केल्याची खात्री करणे आवश्यक आहे, कारण जर तुम्ही बेरीजसाठी शेवटच्या ओळीत प्रवेश करण्याचा प्रयत्न केला तर ते क्रॅश होईल. तर तुम्ही एकतर ते प्रथम आरंभ करा: +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -किंवा या मालमत्तेसाठी रद्द करता येणारा प्रकार न वापरण्यासाठी तुम्ही फक्त तुमचा GraphQL स्कीमा बदलू शकता, नंतर आम्ही ते `codegen` पायरीवर शून्य म्हणून सुरू करू 😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -451,7 +451,7 @@ export class Something { ### Array initialization -`अॅरे` वर्ग अजूनही सूचीची लांबी सुरू करण्यासाठी संख्या स्वीकारतो, तथापि तुम्ही काळजी घेतली पाहिजे कारण `.push` सारखी ऑपरेशन्स सुरुवातीला जोडण्याऐवजी आकार वाढवतील., उदाहरणार्थ: +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -असेंबलीस्क्रिप्ट आवृत्त्यांमधील शून्यता भिन्नतेमुळे हे बदलले आणि ते `src/generated/schema.ts` फाइलशी संबंधित आहे (डिफॉल्ट मार्ग, तुम्ही कदाचित हे बदलले असेल). +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). ### Other - Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) - Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- दोन्ही ऑपरेंड पूर्णांक असल्यास `**` बायनरी ऑपरेशनचा परिणाम आता सामान्य भाजक पूर्णांक आहे. पूर्वी, परिणाम `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) वर कॉल केल्यासारखा फ्लोट होता +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) - Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- `i8`/`u8` किंवा `i16`/`u16` प्रकाराचे लहान पूर्णांक मूल्य हलवताना, फक्त 3 अनुक्रमे 4 किमान RHS मूल्याचे महत्त्वपूर्ण बिट्स परिणामावर परिणाम करतात, `i32.shl` च्या परिणामाप्रमाणेच RHS मूल्याच्या 5 सर्वात कमी महत्त्वपूर्ण बिट्सवर परिणाम होतो. उदाहरण: `someI8 << 8` ने पूर्वी `0` मूल्य तयार केले होते, परंतु आता `someI8` तयार करते कारण RHS ला `8 & 7 = 0` (3 बिट) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- जेव्हा आकार भिन्न असतात तेव्हा रिलेशनल स्ट्रिंग तुलनांचे दोष निराकरण ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/ja/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/ja/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/ja/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/ja/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/ja/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index b004e14d9f98..000000000000 --- a/website/src/pages/ja/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL 検証移行ガイド ---- - -まもなく「graph-node」は [GraphQL Validations 仕様](https://spec.graphql.org/June2018/#sec-Validation) を 100% カバーします。 - -以前のバージョンの「graph-node」は、すべての検証をサポートしておらず、より適切な応答を提供していました。そのため、あいまいな場合、「graph-node」は無効な GraphQL 操作コンポーネントを無視していました。 - -GraphQL Validations サポートは、今後の新機能と The Graph Network の大規模なパフォーマンスの柱です。 - -また、The Graph Network の重要な要件であるクエリ応答の決定性も保証されます。 - -**GraphQL Validations を有効にすると、The Graph API に送信された既存のクエリの一部が壊れます**。 - -これらの検証に準拠するには、移行ガイドに従ってください。 - -> ⚠️ 検証がロールアウトされる前にクエリを移行しないと、エラーが返され、フロントエンド/クライアントが壊れる可能性があります。 - -## 移行ガイド - -CLI 移行ツールを使用して、GraphQL 操作の問題を見つけて修正できます。または、GraphQL クライアントのエンドポイントを更新して、`https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` エンドポイントを使用することもできます。このエンドポイントに対してクエリをテストすると、クエリの問題を見つけるのに役立ちます。 - -> [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) または [GraphQL Code Generator](https://the-guild.dev) を使用している場合、すべてのサブグラフを移行する必要はありません。 /graphql/codegen)、クエリが有効であることを既に確認しています。 - -## 移行 CLI ツール - -**GraphQL 操作エラーのほとんどは、事前にコードベースで見つけることができます。** - -このため、開発中または CI で GraphQL 操作を検証するためのスムーズなエクスペリエンスを提供します。 - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) は、特定のスキーマに対して GraphQL 操作を検証するのに役立つシンプルな CLI ツールです。 - -### **入門** - -ツールは次のように実行できます。 - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**ノート:** - -- $GITHUB_USER、$SUBGRAPH_NAME を適切な値に設定または置き換えます。のように: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- 提供されているプレビュー スキーマ URL (https://api-next.thegraph.com/) は大幅にレート制限されており、すべてのユーザーが新しいバージョンに移行すると廃止されます。 **本番環境では使用しないでください。** -- 操作は、次の拡張子を持つファイルで識別されます [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx `, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` オプション)。 - -### CLI 出力 - -`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI ツールは、GraphQL 操作エラーを次のように出力します。 - -![CLIからのエラー出力](https://i.imgur.com/x1cBdhq.png) - -エラーごとに、説明、ファイル パスと位置、および解決例へのリンクが表示されます (次のセクションを参照)。 - -## プレビュー スキーマに対してローカル クエリを実行する - -検証がオンになっている「graph-node」バージョンを実行するエンドポイント「https://api-next.thegraph.com/」を提供します。 - -クエリを次の宛先に送信して試すことができます。 - -- `https://api-next.thegraph.com/subgraphs/id/` - -または - -- `https://api-next.thegraph.com/subgraphs/name//` - -検証エラーがあるとフラグが立てられたクエリを処理するには、Altair や [GraphiQL](https://cloud.hasura.io/public/graphiql) などの好きな GraphQL クエリ ツールを使用して、クエリを試してみてください。これらのツールは、実行前であっても、UI でこれらのエラーをマークします。 - -## 問題を解決する方法 - -以下に、既存の GraphQL 操作で発生する可能性があるすべての GraphQL 検証エラーを示します。 - -### GraphQL の変数、操作、フラグメント、または引数は一意である必要があります - -操作に GraphQL 変数、操作、フラグメント、および引数の一意のセットが含まれるようにするためのルールを適用しました。 - -GraphQL 操作は、あいまいさが含まれていない場合にのみ有効です。 - -これを実現するには、GraphQL 操作の一部のコンポーネントが一意でなければならないことを確認する必要があります。 - -これらの規則に違反するいくつかの無効な操作の例を次に示します。 - -**クエリ名が重複しています (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_解決:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**フラグメント名の重複 (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_解決:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**重複した変数名 (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_解決:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**引数名が重複しています (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_解決:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**重複した匿名クエリ (#LoneAnonymousOperationRule)** - -また、2 つの匿名操作を使用すると、応答構造の競合により、「LoneAnonymousOperation」ルールに違反します。 - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_解決:_ - -```graphql -query { - someField - otherField -} -``` - -または、2 つのクエリに名前を付けます。 - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### 重複するフィールド - -GraphQL 選択セットは、最終的な結果セットを正しく解決する場合にのみ有効と見なされます。 - -特定の選択セットまたはフィールドが、選択されたフィールドまたは使用された引数のいずれかによってあいまいさを生み出す場合、GraphQL サービスは操作の検証に失敗します。 - -この規則に違反する無効な操作の例をいくつか示します。 - -**競合するフィールド エイリアス (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_解決:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**引数を持つフィールドの競合 (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_解決:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -また、より複雑なユースケースでは、最終的に予想されるセットで競合を引き起こす可能性のある 2 つのフラグメントを使用して、この規則に違反する可能性があります。 - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -それに加えて、`@skip` や `@include` などのクライアント側の GraphQL ディレクティブは、あいまいさにつながる可能性があります。次に例を示します。 - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[アルゴリズムの詳細については、こちらをご覧ください](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### 未使用の変数またはフラグメント - -GraphQL 操作も、操作で定義されたすべてのコンポーネント (変数、フラグメント) が使用されている場合にのみ有効と見なされます。 - -これらのルールに違反する GraphQL 操作の例をいくつか示します: - -**未使用の変数** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_解決:_ - -```graphql -query something { - someData -} -``` - -**未使用のフラグメント** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_解決:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### 無効または欠落している選択セット (#ScalarLeafsRule) - -また、GraphQL フィールドの選択は、以下が検証された場合にのみ有効です: - -- オブジェクト フィールドには選択セットが指定されている必要があります。 -- エッジ フィールド (スカラー、列挙型) には、選択セットが指定されていてはなりません。 - -次のスキーマでこれらの規則に違反する例をいくつか示します: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**無効な選択セット** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_解決:_ - -```graphql -query { - user { - id - } -} -``` - -**選択セットがありません** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_解決:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### 引数の値が正しくない (#VariablesInAllowedPositionRule) - -ハードコーディングされた値を引数に渡す GraphQL 操作は、スキーマで定義された値に基づいて有効である必要があります。 - -これらの規則に違反する無効な操作の例をいくつか示します: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### 不明な型、変数、フラグメント、またはディレクティブ (#UnknownX) - -不明なタイプ、変数、フラグメント、またはディレクティブが使用されている場合、GraphQL API はエラーを発生させます。 - -これらの不明な参照は修正する必要があります: - -- タイプミスだった場合の名前の変更 -- それ以外の場合は、削除します - -### フラグメント: 無効なスプレッドまたは定義 - -**無効なフラグメント スプレッド (#PossibleFragmentSpreadsRule)** - -Fragment は、適用できない型に展開できません。 - -たとえば、`Cat` フラグメントを `Dog` タイプに適用することはできません。 - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**無効なフラグメント定義 (#FragmentsOnCompositeTypesRule)** - -すべての Fragment は、(`on ...` を使用して) 複合型、つまり、オブジェクト、インターフェイス、またはユニオンで定義する必要があります。 - -スカラーでのフラグメントの定義は無効であるため、次の例は無効です。 - -```graphql -ragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### ディレクティブの使用 - -**ディレクティブはこの場所では使用できません (#KnownDirectivesRule)** - -The Graph API でサポートされている GraphQL ディレクティブ (`@...`) のみを使用できます。 - -以下は、GraphQL がサポートするディレクティブの例です: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -\_注: `@stream`、`@live`、`@defer` はサポートされていません。 - -**ディレクティブは、この場所で 1 回だけ使用できます (#UniqueDirectivesPerLocationRule)** - -The Graph でサポートされているディレクティブは、場所ごとに 1 回だけ使用できます。 - -以下は無効です (そして冗長です): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/de/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ja/resources/subgraph-studio-faq.mdx similarity index 53% rename from website/src/pages/de/subgraphs/developing/deploying/subgraph-studio-faq.mdx rename to website/src/pages/ja/resources/subgraph-studio-faq.mdx index b5b8cede7888..8761f7a31bf6 100644 --- a/website/src/pages/de/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ b/website/src/pages/ja/resources/subgraph-studio-faq.mdx @@ -1,31 +1,31 @@ --- -title: Subgraph Studio-FAQs +title: Subgraph Studio FAQs --- -## 1. Was ist Subgraph Studio? +## 1. What is Subgraph Studio? -[Subgraph Studio](https://thegraph.com/studio/) ist eine DApp zum Erstellen, Verwalten und Veröffentlichen von Subgrafen und API-Schlüsseln. +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. -## 2. Wie erstelle ich einen API-Schlüssel? +## 2. How do I create an API Key? To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. -## 3. Kann ich mehrere API-Schlüssel erstellen? +## 3. Can I create multiple API Keys? Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. How do I restrict a domain for an API Key? -Nachdem Sie einen API-Schlüssel erstellt haben, können Sie im Abschnitt Sicherheit die Domänen definieren, die einen bestimmten API-Schlüssel abfragen können. +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. -## 5. Kann ich meinen Subgrafen an einen anderen Eigentümer übertragen? +## 5. Can I transfer my subgraph to another owner? Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. -Beachten Sie, dass Sie den Subgrafen nach der Übertragung nicht mehr in Studio sehen oder bearbeiten können. +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. ## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. -Denken Sie daran, dass Sie einen API-Schlüssel erstellen und jeden im Netzwerk veröffentlichten Subgrafen abfragen können, auch wenn Sie selbst einen Subgrafen erstellen. Diese Abfragen über den neuen API-Schlüssel sind wie alle anderen im Netzwerk kostenpflichtige Abfragen. +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/ja/subgraphs/_meta-titles.json b/website/src/pages/ja/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/ja/subgraphs/_meta-titles.json +++ b/website/src/pages/ja/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/ja/subgraphs/_meta.js b/website/src/pages/ja/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/ja/subgraphs/_meta.js +++ b/website/src/pages/ja/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/ja/subgraphs/best-practices/_meta.js b/website/src/pages/ja/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/ja/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/ja/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/ja/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/ja/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/ja/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/ja/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/ja/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/ja/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ja/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/ja/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/best-practices/pruning.mdx b/website/src/pages/ja/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/ja/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/best-practices/timeseries.mdx b/website/src/pages/ja/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/ja/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/cookbook/_meta.js b/website/src/pages/ja/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/ja/subgraphs/cookbook/_meta.js +++ b/website/src/pages/ja/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/ja/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/ja/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index ce3e717c103a..000000000000 --- a/website/src/pages/ja/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### 概要 - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## その他のリソース - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/ja/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/cookbook/pruning.mdx b/website/src/pages/ja/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/ja/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ja/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 6ebf5e48a235..000000000000 --- a/website/src/pages/ja/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## 概要 - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -例: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -例: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -例: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -例: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ja/subgraphs/developing/deploying/_meta.js b/website/src/pages/ja/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/ja/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/ja/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 18fa824a4d06..000000000000 --- a/website/src/pages/ja/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: サブグラフスタジオFAQ ---- - -## 1. サブグラフスタジオとは? - -[Subgraph Studio](https://thegraph.com/studio/)は、サブグラフやAPIキーを作成・管理・公開するためのDappであり、サブグラフの作成・管理・公開を行う。 - -## 2. API キーを作成するにはどうすればよいですか? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. 複数の API キーを作成できますか? - -A: はい、できます。異なるプロジェクトで使用するために、[こちら](https://thegraph.com/studio/apikeys/)のリンクをご確認ください。 - -## 4. API キーのドメインを制限するにはどうすればよいですか? - -API キーを作成後、「セキュリティ」セクションで、特定の API キーにクエリ可能なドメインを定義できます。 - -## 5. 自分のサブグラフを他のオーナーに譲渡することはできますか? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -サブグラフが転送されると、Studio でサブグラフを表示または編集できなくなることに注意してください。 - -## 6. 使用したいサブグラフの開発者ではない場合、サブグラフのクエリ URL を見つけるにはどうすればよいですか? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -APIキーを作成すると、自分でサブグラフを構築した場合でも、ネットワークに公開されているすべてのサブグラフにクエリを実行できることを覚えておいてください。新しい API キーを介したこれらのクエリは、ネットワーク上の他のクエリと同様に支払われます。 diff --git a/website/src/pages/ja/subgraphs/developing/publishing/_meta.js b/website/src/pages/ja/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/ja/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/ja/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/ja/subgraphs/querying/_meta.js b/website/src/pages/ja/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/ja/subgraphs/querying/_meta.js +++ b/website/src/pages/ja/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/ko/resources/_meta-titles.json b/website/src/pages/ko/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/ko/resources/_meta-titles.json +++ b/website/src/pages/ko/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/ko/resources/_meta.js b/website/src/pages/ko/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/ko/resources/_meta.js +++ b/website/src/pages/ko/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/ko/resources/release-notes/_meta.js b/website/src/pages/ko/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/ko/resources/release-notes/_meta.js rename to website/src/pages/ko/resources/migration-guides/_meta.js diff --git a/website/src/pages/ja/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ko/resources/migration-guides/assemblyscript-migration-guide.mdx similarity index 53% rename from website/src/pages/ja/resources/release-notes/assemblyscript-migration-guide.mdx rename to website/src/pages/ko/resources/migration-guides/assemblyscript-migration-guide.mdx index 766fbb6c80a3..85f6903a6c69 100644 --- a/website/src/pages/ja/resources/release-notes/assemblyscript-migration-guide.mdx +++ b/website/src/pages/ko/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -1,18 +1,18 @@ --- -title: AssemblyScript マイグレーションガイド +title: AssemblyScript Migration Guide --- -これまでサブグラフは、[AssemblyScript の最初のバージョン](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6)を使用していました。 ついに[最新のバージョン](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10)(v0.19.10) のサポートを追加しました! 🎉 +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 -これにより、サブグラフの開発者は、AS 言語と標準ライブラリの新しい機能を使用できるようになります。 +That will enable subgraph developers to use newer features of the AS language and standard library. -このガイドは、バージョン`0.22.0`以下の`graph-cli`/`graph-ts` をお使いの方に適用されます。 もしあなたがすでにそれ以上のバージョンにいるなら、あなたはすでに AssemblyScript のバージョン`0.19.10` を使っています。 +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 -> 注:`0.24.0`以降、`graph-node`はサブグラフマニフェストで指定された`apiVersion`に応じて、両方のバージョンをサポートしています。 +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. -## 特徴 +## Features -### 新機能 +### New functionality - `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) - New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) @@ -30,21 +30,21 @@ title: AssemblyScript マイグレーションガイド - Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) - Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) -### 最適化 +### Optimizations - `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) - Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) - Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) - Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -### その他 +### Other - The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) - Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -## アップグレードの方法 +## How to upgrade? -1. `subgraph.yaml`のマッピングの`apiVersion`を`0.0.6`に変更してください。 +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. 使用している`graph-cli`を`最新版`に更新するには、次のように実行します。 +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # if you have it globally installed @@ -66,20 +66,20 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. `graph-ts`についても同様ですが、グローバルにインストールするのではなく、メインの依存関係に保存します。 +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` -4. ガイドの残りの部分に従って、言語の変更を修正します。 -5. `codegen`を実行し、再度`deploy`します。 +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. -## 変更点 +## Breaking changes -### ヌル可能性 +### Nullability -古いバージョンの AssemblyScript では、以下のようなコードを作ることができました: +On the older version of AssemblyScript, you could create code like this: ```typescript function load(): Value | null { ... } @@ -88,7 +88,7 @@ let maybeValue = load(); maybeValue.aMethod(); ``` -しかし、新しいバージョンでは、値が nullable であるため、次のようにチェックする必要があります: +However on the newer version, because the value is nullable, it requires you to check, like this: ```typescript let maybeValue = load() @@ -98,7 +98,7 @@ if (maybeValue) { } ``` -あるいは、次のように強制します: +Or force it like this: ```typescript let maybeValue = load()! // breaks in runtime if value is null @@ -106,19 +106,19 @@ let maybeValue = load()! // breaks in runtime if value is null maybeValue.aMethod() ``` -どちらを選択すべきか迷った場合は、常に安全なバージョンを使用することをお勧めします。 値が存在しない場合は、サブグラフハンドラの中で return を伴う初期の if 文を実行するとよいでしょう。 +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. -### 変数シャドウイング +### Variable Shadowing -以前は、[変数のシャドウイング](https://en.wikipedia.org/wiki/Variable_shadowing)を行うことができ、次のようなコードが動作していました。 +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript -et a = 10 +let a = 10 let b = 20 let a = a + b ``` -しかし、現在はこれができなくなり、コンパイラは次のようなエラーを返します。 +However now this isn't possible anymore, and the compiler returns this error: ```typescript ERROR TS2451: Cannot redeclare block-scoped variable 'a' @@ -128,11 +128,11 @@ ERROR TS2451: Cannot redeclare block-scoped variable 'a' in assembly/index.ts(4,3) ``` -変数シャドウイングを行っていた場合は、重複する変数の名前を変更する必要があります。 +You'll need to rename your duplicate variables if you had variable shadowing. -### Null 比較 +### Null Comparisons -サブグラフのアップグレードを行うと、時々以下のようなエラーが発生することがあります。 +By doing the upgrade on your subgraph, sometimes you might get errors like these: ```typescript ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -解決するには、 `if` 文を以下のように変更するだけです。 +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -151,23 +151,23 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i if (decimals === null) { ``` -== の代わりに != を実行している場合も同様です。 +The same applies if you're doing != instead of ==. -### 鋳造 +### Casting -以前の一般的なキャストの方法は、次のように`as`キーワードを使うだけでした。 +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) let uint8Array = byteArray as Uint8Array // equivalent to: byteArray ``` -しかし、これは 2 つのシナリオでしか機能しません。 +However this only works in two scenarios: -- プリミティブなキャスト(between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- クラス継承のアップキャスティング(サブクラス → スーパークラス) +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) -例 +Examples: ```typescript // primitive casting @@ -184,10 +184,10 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -キャストしたくても、`as`/`var`を使うと**安全ではない**というシナリオが 2 つあります。 +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: -- クラス継承のダウンキャスト(スーパークラス → サブクラス) -- スーパークラスを共有する 2 つの型の間 +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass ```typescript // downcasting on class inheritance @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -このような場合には、`changetype`関数を使用します。 +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -単に null 性を除去したいだけなら、`as` オペレーター(or `variable`)を使い続けることができますが、値が null ではないことを確認しておかないと壊れてしまいます。 +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. ```typescript // remove nullability @@ -238,18 +238,18 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -Nullability については、[nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks)を利用することをお勧めします。それはあなたのコードをよりきれいにします🙂 +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 -また、キャストを容易にするために、いくつかの型にスタティックメソッドを追加しました。 +Also we've added a few more static methods in some types to ease casting, they are: - Bytes.fromByteArray - Bytes.fromUint8Array - BigInt.fromByteArray - ByteArray.fromBigInt -### プロパティアクセスによる Nullability チェック +### Nullability check with property access -[nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks)を使用するには、次のように`if`文や三項演算子(`?` and `:`) を使用します。 +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -しかし、これは、以下のように、プロパティのアクセスではなく、変数に対して`if`/ternary を行っている場合にのみ機能します。 +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -277,10 +277,10 @@ class Container { let container = new Container() container.data = 'data' -let somethingOrElse: string = container.data ? +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile ``` -すると、このようなエラーが出力されます。 +Which outputs this error: ```typescript ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. @@ -289,7 +289,7 @@ ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/s ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``` -この問題を解決するには、そのプロパティアクセスのための変数を作成して、コンパイラが nullability check のマジックを行うようにします。 +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: ```typescript class Container { @@ -301,12 +301,12 @@ container.data = 'data' let data = container.data -let somethingOrElse: string = data :) +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) ``` -### プロパティアクセスによるオペレーターオーバーロード +### Operator overloading with property access -たとえば、(プロパティ アクセスからの) null 許容型と null 非許容型を合計しようとすると、AssemblyScript コンパイラは、値の 1 つが null 許容であるというコンパイル時のエラー警告を表示する代わりに、黙ってコンパイルします。実行時にコードが壊れる可能性を与えます。 +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. ```typescript class BigInt extends Uint8Array { @@ -330,7 +330,7 @@ let wrapper = new Wrapper(y) wrapper.n = wrapper.n + x // doesn't give compile time errors as it should ``` -この件に関して、アセンブリ・スクリプト・コンパイラーに問題を提起しましたが、 今のところ、もしサブグラフ・マッピングでこの種の操作を行う場合には、 その前に NULL チェックを行うように変更してください。 +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. ```typescript let wrapper = new Wrapper(y) @@ -342,9 +342,9 @@ if (!wrapper.n) { wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt ``` -### 値の初期化 +### Value initialization -もし、このようなコードがあった場合: +If you have any code like this: ```typescript var value: Type // null @@ -352,7 +352,7 @@ value.x = 10 value.y = 'content' ``` -これは、値が初期化されていないために起こります。したがって、次のようにサブグラフが値を初期化していることを確認してください。 +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: ```typescript var value = new Type() // initialized @@ -360,7 +360,7 @@ value.x = 10 value.y = 'content' ``` -また、以下のように GraphQL のエンティティに Nullable なプロパティがある場合も同様です。 +Also if you have nullable properties in a GraphQL entity, like this: ```graphql type Total @entity { @@ -369,7 +369,7 @@ type Total @entity { } ``` -そして、以下のようなコードになります: +And you have code similar to this: ```typescript let total = Total.load('latest') @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -`total.amount`の値を確実に初期化する必要があります。なぜなら、最後の行の sum のようにアクセスしようとすると、クラッシュしてしまうからです。 そのため、最初に初期化する必要があります。 +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -あるいは、このプロパティに nullable 型を使用しないように GraphQL スキーマを変更することもできます。そうすれば、`コード生成`の段階でゼロとして初期化されます。😉 +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 ```graphql type Total @entity { @@ -413,9 +413,9 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -### クラスのプロパティの初期化 +### Class property initialization -以下のように、他のクラス(自分で宣言したものや標準ライブラリで宣言したもの)のプロパティを持つクラスをエクスポートした場合、そのクラスのプロパティを初期化します: +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: ```typescript class Thing {} @@ -425,7 +425,7 @@ export class Something { } ``` -コンパイラがエラーになるのは、クラスであるプロパティにイニシャライザを追加するか、`!` オペレーターを追加する必要があるからです。 +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -449,9 +449,9 @@ export class Something { } ``` -### 配列の初期化 +### Array initialization -`Array`クラスは、リストの長さを初期化するための数値を依然として受け取ります。しかし、例えば`.push`のような操作は、先頭に追加するのではなく、実際にサイズを大きくするので、注意が必要です。 +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -459,13 +459,13 @@ let arr = new Array(5) // ["", "", "", "", ""] arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ``` -使用している型(例えば null 可能な型) とそのアクセス方法によっては、次のようなランタイムエラーに遭遇する可能性があります。 +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: ``` ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -実際に最初にプッシュするには、以下のように、サイズゼロの `Array`を初期化する必要があります: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -473,7 +473,7 @@ let arr = new Array(0) // [] arr.push('something') // ["something"] ``` -あるいは、インデックス経由で変異させるべきでしょう: +Or you should mutate it via index: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -481,11 +481,11 @@ let arr = new Array(5) // ["", "", "", "", ""] arr[0] = 'something' // ["something", "", "", "", ""] ``` -### GraphQLスキーマ +### GraphQL schema -これは直接のAssemblyScriptの変更ではありませんが、`schema.graphql` ファイルを更新する必要があるかもしれません。 +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. -この変更により、Non-Nullable Listのフィールドを型に定義することができなくなりました。仮に、以下のようなスキーマがあった場合: +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: ```graphql type Something @entity { @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -List タイプのメンバーには、以下のように`!` を付ける必要があります: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -これはAssemblyScriptのバージョンによるNullabilityの違いで変わったもので、`src/generated/schema.ts`ファイル(デフォルトパス、変更されているかもしれません)に関連しています。 +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). -### その他 +### Other - Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) - Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) - Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- タイプ `i8`/`u8` または `i16`/`u16` の小さな整数値をシフトする場合、最小の 3 つ、それぞれ 4 つだけRHS 値の上位 5 ビットのみが影響を受ける `i32.shl` の結果と同様に、RHS 値の有効ビットが結果に影響します。例: `someI8 << 8` は以前は値 `0` を生成していましたが、RHS を `8 & 7 = 0` としてマスクするため、`someI8` を生成するようになりました。(3 ビット) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) - Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/ko/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/ko/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/ko/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/ko/resources/subgraph-studio-faq.mdx b/website/src/pages/ko/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/ko/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/ko/subgraphs/_meta-titles.json b/website/src/pages/ko/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/ko/subgraphs/_meta-titles.json +++ b/website/src/pages/ko/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/ko/subgraphs/_meta.js b/website/src/pages/ko/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/ko/subgraphs/_meta.js +++ b/website/src/pages/ko/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/ko/subgraphs/best-practices/_meta.js b/website/src/pages/ko/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/ko/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/ko/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/ko/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/ko/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/ko/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/ko/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/ko/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/ko/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ko/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/ko/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/best-practices/pruning.mdx b/website/src/pages/ko/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/ko/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/best-practices/timeseries.mdx b/website/src/pages/ko/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/ko/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/cookbook/_meta.js b/website/src/pages/ko/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/ko/subgraphs/cookbook/_meta.js +++ b/website/src/pages/ko/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/ko/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/ko/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/ko/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/cookbook/pruning.mdx b/website/src/pages/ko/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/ko/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ko/subgraphs/developing/deploying/_meta.js b/website/src/pages/ko/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/ko/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/ko/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/ko/subgraphs/developing/publishing/_meta.js b/website/src/pages/ko/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/ko/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/ko/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/ko/subgraphs/querying/_meta.js b/website/src/pages/ko/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/ko/subgraphs/querying/_meta.js +++ b/website/src/pages/ko/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/mr/resources/_meta-titles.json b/website/src/pages/mr/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/mr/resources/_meta-titles.json +++ b/website/src/pages/mr/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/mr/resources/_meta.js b/website/src/pages/mr/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/mr/resources/_meta.js +++ b/website/src/pages/mr/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/mr/resources/release-notes/_meta.js b/website/src/pages/mr/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/mr/resources/release-notes/_meta.js rename to website/src/pages/mr/resources/migration-guides/_meta.js diff --git a/website/src/pages/mr/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/mr/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/mr/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/mr/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/mr/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/mr/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/mr/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/mr/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index b0540fd8bbc8..000000000000 --- a/website/src/pages/mr/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL Validations migration guide ---- - -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). - -`graph-node` च्या मागील आवृत्त्यांनी सर्व प्रमाणीकरणांना समर्थन दिले नाही आणि अधिक सुंदर प्रतिसाद दिले - म्हणून, संदिग्धतेच्या बाबतीत, `graph-node` अवैध GraphQL ऑपरेशन घटकांकडे दुर्लक्ष करत आहे. - -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. - -It will also ensure determinism of query responses, a key requirement on The Graph Network. - -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. - -To be compliant with those validations, please follow the migration guide. - -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. - -## स्थलांतर मार्गदर्शक - -तुमच्या GraphQL ऑपरेशन्समधील समस्या शोधण्यासाठी आणि त्यांचे निराकरण करण्यासाठी तुम्ही CLI माइग्रेशन टूल वापरू शकता. वैकल्पिकरित्या तुम्ही `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` एंडपॉइंट वापरण्यासाठी तुमच्या GraphQL क्लायंटचा एंडपॉइंट अपडेट करू शकता. या एंडपॉइंटवर तुमच्या क्वेरींची चाचणी केल्याने तुम्हाला तुमच्या क्वेरींमधील समस्या शोधण्यात मदत होईल. - -> तुम्ही [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) किंवा [GraphQL कोड जनरेटर](https://the-guild.dev) वापरत असल्यास, सर्व उपग्राफ स्थलांतरित करण्याची गरज नाही /graphql/codegen), ते तुमच्या क्वेरी वैध असल्याची खात्री करतात. - -## Migration CLI tool - -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** - -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) हे एक साधे CLI साधन आहे जे दिलेल्या स्कीमावर GraphQL ऑपरेशन्स प्रमाणित करण्यात मदत करते. - -### **Getting started** - -You can run the tool as follows: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notes:** - -- योग्य मूल्यांसह $GITHUB_USER, $SUBGRAPH_NAME सेट किंवा पुनर्स्थित करा. जसे: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- प्रिव्ह्यू स्कीमा URL (https://api-next.thegraph.com/) प्रदान केली आहे हे खूप प्रमाणात मर्यादित आहे आणि सर्व वापरकर्ते नवीन आवृत्तीवर स्थलांतरित झाल्यावर सूर्यास्त होईल. **उत्पादनात वापरू नका.** -- खालील विस्तार [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx सह फायलींमध्ये ऑपरेशन ओळखले जातात `, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` पर्याय). - -### CLI output - -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). - -## Run your local queries against the preview schema - -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. - -You can try out queries by sending them to: - -- `https://api-next.thegraph.com/subgraphs/id/` - -किंवा - -- `https://api-next.thegraph.com/subgraphs/name//` - -प्रमाणीकरण त्रुटी म्हणून ध्वजांकित केलेल्या क्वेरींवर कार्य करण्यासाठी, तुम्ही तुमचे आवडते GraphQL क्वेरी टूल वापरू शकता, जसे की Altair किंवा [GraphiQL](https://cloud.hasura.io/public/graphiql), आणि तुमची क्वेरी वापरून पहा. ती साधने तुम्ही चालवण्यापूर्वीच त्यांच्या UI मध्ये त्या त्रुटी देखील चिन्हांकित करतील. - -## समस्यांचे निराकरण कसे करावे - -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. - -### GraphQL variables, operations, fragments, or arguments must be unique - -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. - -A GraphQL operation is only valid if it does not contain any ambiguity. - -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. - -Here's an example of a few invalid operations that violates these rules: - -**Duplicate Query name (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Or name the two queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Overlapping Fields - -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. - -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. - -Here are a few examples of invalid operations that violate this rule: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Unused Variables or Fragments - -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. - -Here are a few examples for GraphQL operations that violates these rules: - -**Unused variable** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### अवैध किंवा गहाळ निवड-सेट (#ScalarLeafsRule) - -Also, a GraphQL field selection is only valid if the following is validated: - -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. - -Here are a few examples of violations of these rules with the following Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Incorrect Arguments values (#VariablesInAllowedPositionRule) - -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. - -Here are a few examples of invalid operations that violate these rules: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) - -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. - -Those unknown references must be fixed: - -- rename if it was a typo -- otherwise, remove - -### तुकडा: अवैध स्प्रेड किंवा परिभाषा - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** - -A Fragment cannot be spread on a non-applicable type. - -Example, we cannot apply a `Cat` fragment to the `Dog` type: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** - -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. - -The following examples are invalid, since defining fragments on scalars is invalid. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Directives usage - -**Directive cannot be used at this location (#KnownDirectivesRule)** - -Only GraphQL directives (`@...`) supported by The Graph API can be used. - -Here is an example with The GraphQL supported directives: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` are not supported._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** - -The directives supported by The Graph can only be used once per location. - -The following is invalid (and redundant): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/mr/resources/subgraph-studio-faq.mdx b/website/src/pages/mr/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/mr/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/mr/subgraphs/_meta-titles.json b/website/src/pages/mr/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/mr/subgraphs/_meta-titles.json +++ b/website/src/pages/mr/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/mr/subgraphs/_meta.js b/website/src/pages/mr/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/mr/subgraphs/_meta.js +++ b/website/src/pages/mr/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/mr/subgraphs/best-practices/_meta.js b/website/src/pages/mr/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/mr/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/mr/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/mr/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/mr/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/mr/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/mr/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/mr/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/mr/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/mr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/mr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/best-practices/pruning.mdx b/website/src/pages/mr/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/mr/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/best-practices/timeseries.mdx b/website/src/pages/mr/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/mr/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/cookbook/_meta.js b/website/src/pages/mr/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/mr/subgraphs/cookbook/_meta.js +++ b/website/src/pages/mr/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/mr/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/mr/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 871f930abd43..000000000000 --- a/website/src/pages/mr/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### सविश्लेषण - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Additional Resources - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/mr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/cookbook/pruning.mdx b/website/src/pages/mr/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/mr/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/cookbook/timeseries.mdx b/website/src/pages/mr/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index aef1fe57ac5f..000000000000 --- a/website/src/pages/mr/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## सविश्लेषण - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -उदाहरण: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -उदाहरण: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -उदाहरण: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -उदाहरण: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/mr/subgraphs/developing/deploying/_meta.js b/website/src/pages/mr/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/mr/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/mr/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index badcf3fee7c6..000000000000 --- a/website/src/pages/mr/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: सबग्राफ स्टुडिओ FAQ ---- - -## 1. सबग्राफ स्टुडिओ म्हणजे काय? - -[सबग्राफ स्टुडिओ](https://thegraph.com/studio/) हे सबग्राफ आणि API की तयार करण्यासाठी, व्यवस्थापित करण्यासाठी आणि प्रकाशित करण्यासाठी एक डॅप आहे. - -## 2. मी API की कशी तयार करू? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. मी एकाधिक API की तयार करू शकतो? - -होय! तुम्ही वेगवेगळ्या प्रकल्पांमध्ये वापरण्यासाठी एकाधिक API की तयार करू शकता. लिंक [येथे](https://thegraph.com/studio/apikeys/) पहा. - -## 4. मी API की साठी डोमेन कसे प्रतिबंधित करू? - -API की तयार केल्यानंतर, सिक्युरिटी विभागात, तुम्ही डोमेन परिभाषित करू शकता जे विशिष्ट क्वेरी करू शकतात API. - -## 5. मी माझा सबग्राफ दुसर्‍या मालकाकडे हस्तांतरित करू शकतो का? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -लक्षात ठेवा की एकदा स्‍टुडिओमध्‍ये सबग्राफ स्‍थानांतरित केल्‍यानंतर तुम्‍ही तो पाहू किंवा संपादित करू शकणार नाही. - -## 6. मला वापरायचा असलेल्या सबग्राफचा मी विकसक नसल्यास सबग्राफसाठी क्वेरी URL कसे शोधू? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -लक्षात ठेवा की तुम्ही API की तयार करू शकता आणि नेटवर्कवर प्रकाशित केलेल्या कोणत्याही सबग्राफची क्वेरी करू शकता, जरी तुम्ही स्वतः सबग्राफ तयार केला असला तरीही. नवीन API की द्वारे या क्वेरी, नेटवर्कवरील इतर कोणत्याही सशुल्क क्वेरी आहेत. diff --git a/website/src/pages/mr/subgraphs/developing/publishing/_meta.js b/website/src/pages/mr/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/mr/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/mr/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/mr/subgraphs/querying/_meta.js b/website/src/pages/mr/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/mr/subgraphs/querying/_meta.js +++ b/website/src/pages/mr/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/nl/resources/_meta-titles.json b/website/src/pages/nl/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/nl/resources/_meta-titles.json +++ b/website/src/pages/nl/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/nl/resources/_meta.js b/website/src/pages/nl/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/nl/resources/_meta.js +++ b/website/src/pages/nl/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/nl/resources/release-notes/_meta.js b/website/src/pages/nl/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/nl/resources/release-notes/_meta.js rename to website/src/pages/nl/resources/migration-guides/_meta.js diff --git a/website/src/pages/nl/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/nl/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/nl/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/nl/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/nl/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/nl/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/nl/resources/subgraph-studio-faq.mdx b/website/src/pages/nl/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/nl/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/nl/subgraphs/_meta-titles.json b/website/src/pages/nl/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/nl/subgraphs/_meta-titles.json +++ b/website/src/pages/nl/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/nl/subgraphs/_meta.js b/website/src/pages/nl/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/nl/subgraphs/_meta.js +++ b/website/src/pages/nl/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/nl/subgraphs/best-practices/_meta.js b/website/src/pages/nl/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/nl/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/nl/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/nl/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/nl/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/nl/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/nl/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/nl/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/nl/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/nl/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/nl/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/best-practices/pruning.mdx b/website/src/pages/nl/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/nl/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/best-practices/timeseries.mdx b/website/src/pages/nl/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/nl/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/cookbook/_meta.js b/website/src/pages/nl/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/nl/subgraphs/cookbook/_meta.js +++ b/website/src/pages/nl/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/nl/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/nl/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/nl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/cookbook/pruning.mdx b/website/src/pages/nl/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/nl/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/nl/subgraphs/developing/deploying/_meta.js b/website/src/pages/nl/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/nl/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/nl/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/nl/subgraphs/developing/publishing/_meta.js b/website/src/pages/nl/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/nl/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/nl/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/nl/subgraphs/querying/_meta.js b/website/src/pages/nl/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/nl/subgraphs/querying/_meta.js +++ b/website/src/pages/nl/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/pl/resources/_meta-titles.json b/website/src/pages/pl/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/pl/resources/_meta-titles.json +++ b/website/src/pages/pl/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/pl/resources/_meta.js b/website/src/pages/pl/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/pl/resources/_meta.js +++ b/website/src/pages/pl/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/pl/resources/release-notes/_meta.js b/website/src/pages/pl/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/pl/resources/release-notes/_meta.js rename to website/src/pages/pl/resources/migration-guides/_meta.js diff --git a/website/src/pages/pl/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/pl/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/pl/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/pl/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/pl/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/pl/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/pl/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/pl/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 4d909e8970a8..000000000000 --- a/website/src/pages/pl/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL Validations migration guide ---- - -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). - -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. - -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. - -It will also ensure determinism of query responses, a key requirement on The Graph Network. - -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. - -To be compliant with those validations, please follow the migration guide. - -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. - -## Migration guide - -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. - -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. - -## Migration CLI tool - -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** - -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. - -### **Getting started** - -You can run the tool as follows: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notes:** - -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### CLI output - -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). - -## Run your local queries against the preview schema - -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. - -You can try out queries by sending them to: - -- `https://api-next.thegraph.com/subgraphs/id/` - -or - -- `https://api-next.thegraph.com/subgraphs/name//` - -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. - -## How to solve issues - -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. - -### GraphQL variables, operations, fragments, or arguments must be unique - -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. - -A GraphQL operation is only valid if it does not contain any ambiguity. - -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. - -Here's an example of a few invalid operations that violates these rules: - -**Duplicate Query name (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Or name the two queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Overlapping Fields - -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. - -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. - -Here are a few examples of invalid operations that violate this rule: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Unused Variables or Fragments - -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. - -Here are a few examples for GraphQL operations that violates these rules: - -**Unused variable** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Invalid or missing Selection-Set (#ScalarLeafsRule) - -Also, a GraphQL field selection is only valid if the following is validated: - -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. - -Here are a few examples of violations of these rules with the following Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Incorrect Arguments values (#VariablesInAllowedPositionRule) - -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. - -Here are a few examples of invalid operations that violate these rules: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) - -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. - -Those unknown references must be fixed: - -- rename if it was a typo -- otherwise, remove - -### Fragment: invalid spread or definition - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** - -A Fragment cannot be spread on a non-applicable type. - -Example, we cannot apply a `Cat` fragment to the `Dog` type: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** - -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. - -The following examples are invalid, since defining fragments on scalars is invalid. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Directives usage - -**Directive cannot be used at this location (#KnownDirectivesRule)** - -Only GraphQL directives (`@...`) supported by The Graph API can be used. - -Here is an example with The GraphQL supported directives: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` are not supported._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** - -The directives supported by The Graph can only be used once per location. - -The following is invalid (and redundant): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/pl/resources/subgraph-studio-faq.mdx b/website/src/pages/pl/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/pl/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/pl/subgraphs/_meta-titles.json b/website/src/pages/pl/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/pl/subgraphs/_meta-titles.json +++ b/website/src/pages/pl/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/pl/subgraphs/_meta.js b/website/src/pages/pl/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/pl/subgraphs/_meta.js +++ b/website/src/pages/pl/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/pl/subgraphs/best-practices/_meta.js b/website/src/pages/pl/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/pl/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/pl/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/pl/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/pl/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/pl/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/pl/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/pl/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/pl/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/pl/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/pl/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/best-practices/pruning.mdx b/website/src/pages/pl/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/pl/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/best-practices/timeseries.mdx b/website/src/pages/pl/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/pl/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/cookbook/_meta.js b/website/src/pages/pl/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/pl/subgraphs/cookbook/_meta.js +++ b/website/src/pages/pl/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/pl/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/pl/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/pl/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/cookbook/pruning.mdx b/website/src/pages/pl/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/pl/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pl/subgraphs/developing/deploying/_meta.js b/website/src/pages/pl/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/pl/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/pl/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/pl/subgraphs/developing/publishing/_meta.js b/website/src/pages/pl/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/pl/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/pl/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/pl/subgraphs/querying/_meta.js b/website/src/pages/pl/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/pl/subgraphs/querying/_meta.js +++ b/website/src/pages/pl/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/pt/resources/_meta-titles.json b/website/src/pages/pt/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/pt/resources/_meta-titles.json +++ b/website/src/pages/pt/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/pt/resources/_meta.js b/website/src/pages/pt/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/pt/resources/_meta.js +++ b/website/src/pages/pt/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/pt/resources/release-notes/_meta.js b/website/src/pages/pt/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/pt/resources/release-notes/_meta.js rename to website/src/pages/pt/resources/migration-guides/_meta.js diff --git a/website/src/pages/pt/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/pt/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/pt/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/pt/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/pt/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/pt/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/pt/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/pt/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index ce410b9ed255..000000000000 --- a/website/src/pages/pt/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: Guia de Migração do AssemblyScript ---- - -Até agora, os subgraphs têm usado uma das [primeiras versões do AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finalmente, adicionamos apoio à versão [mais recente disponível](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v.0.19.10)! 🎉 - -Isto permitirá que os programadores de subgraph usem recursos mais novos da linguagem AS e da sua biblioteca normal. - -Este guia se aplica a quem usar o `graph-cli`/`graph-ts` antes da versão `0.22.0`. Se já está numa versão maior (ou igual) àquela, já está a usar a versão `0.19.10` do AssemblyScript 🙂 - -> Nota: Desde a versão `0.24.0`, o `graph-node` pode apoiar ambas as versões, dependente da `apiVersion` especificada no manifest do subgraph. - -## Recursos - -### Novas funcionalidades - -- `TypedArray`s podem ser construídos de `ArrayBuffer`s com o novo método estático [ `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Novas funções de biblioteca normais: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` e `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Suporte para x GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- `StaticArray`, uma variante de arranjo mais eficiente ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementado o argumento `radix` no `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Suporte para separadores em literais de ponto flutuante ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Suporte para funções de primeira classe ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Embutidos: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Suporte para strings literais de modelos ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- `encodeURI(Component)` e `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- `toString`, `toDateString` e `toTimeString` ao `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- `toUTCString` para a `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Tipo embutido `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Otimizações - -- Funções `Math` como `exp`, `exp2`, `log`, `log2` e `pow` foram substituídas por variantes mais rápidas ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Otimizado levemente o `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cacheing de mais acessos de campos em Map e Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Otimização para poderes de dois no `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Outros - -- O tipo de um literal de arranjos agora pode ser inferido dos seus conteúdos ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- stdlib atualizado ao Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## Como atualizar? - -1. Mude os seus mapeamentos de `apiVersion` no `subgraph.yaml` para `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Atualize o `graph-cli` que usa à versão mais recente (`latest`) com: - -```bash -# caso o tenha instalado globalmente -npm install --global @graphprotocol/graph-cli@latest - -# ou no seu subgraph se o tiver como dependência de desenvolvimento -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Faça o mesmo para o `graph-ts`, mas em vez de instalar globalmente, salve-o nas suas dependências principais: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Siga o resto do guia para consertar as mudanças frágeis na linguagem. -5. Execute `codegen` e `deploy` novamente. - -## Breaking changes (mudanças frágeis) - -### Anulabilidade - -Na versão mais antiga do AssemblyScript, podias criar códigos assim: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -Mas na versão mais nova, como o valor é anulável, ele exige que confira, assim: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -...ou o force deste jeito: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -Se não tiver certeza de qual escolher, é sempre bom usar a versão segura. Se o valor não existir, pode fazer uma declaração `if` precoce com um retorno no seu handler de subgraph. - -### Sombreamento Varíavel - -Antes, ao fazer [sombreamentos variáveis](https://en.wikipedia.org/wiki/Variable_shadowing), códigos assim funcionavam bem: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Porém, isto não é mais possível, e o compilador retorna este erro: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -Renomeie as suas variáveis duplicadas, se tinha o sombreamento variável. - -### Comparações de Nulos - -Ao fazer a atualização no seu subgraph, às vezes aparecem erros como este: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -Para resolver isto, basta mudar a declaração `if` para algo assim: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -O mesmo acontece se fizer o != em vez de ==. - -### Casting (Conversão de tipos) - -Antigamente, casting era normalmente feito com a palavra-chave `as`, assim: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -Porém, isto só funciona em dois casos: - -- Casting primitivo (entre tipos como `u8`, `i32`, `bool`; por ex.: `let b: isize = 10; b as usize`); -- Upcasting em herança de classe (subclass → superclass) - -Exemplos: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting em herança de classe -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // mesmo que: bytes como Uint8Array -``` - -Há dois cenários onde casting é possível, mas usar `as`/`var` **não é seguro**: - -- Downcasting em herança de classe (superclass → subclass) -- Entre dois tipos que compartilham uma superclasse - -```typescript -// downcasting em herança de classe -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // quebra no runtime :( -``` - -```typescript -// entre dois tipos que compartilham uma superclasse -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // quebra no runtime :( -``` - -Nestes casos, vale usar a função `changetype`: - -```typescript -// downcasting em herança de classe -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // funciona :) -``` - -```typescript -// entre dois tipos que compartilham uma superclasse -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // funciona :) -``` - -Se só quiser tirar a anulabilidade, pode continuar a usar o operador `as` (ou `variable`), mas tenha ciência de que sabe que o valor não pode ser nulo, ou ele falhará. - -```typescript -// remove anulabilidade -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -Para o caso de anulabilidade, é bom dar uma olhada no [recurso de verificação de anulabilidade](https://www.assemblyscript.org/basics.html#nullability-checks), pois ele deixará o seu código mais limpinho 🙂 - -Também adicionamos alguns métodos estáticos em alguns tipos para facilitar o casting, sendo: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Checagem de anulabilidade com acesso à propriedade - -Para usar a [checagem de anulabilidade](https://www.assemblyscript.org/basics.html#nullability-checks), dá para usar declarações `if` ou o operador ternário (`?` e `:`) assim: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// ou - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -Mas isto só funciona ao fazer o ternário `if` / numa variável, e não num acesso de propriedade, assim: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile -``` - -O que retorna este erro: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -Para consertar este problema, vale criar uma variável para aquele acesso à propriedade, para que o compilador faça a magia da checagem de anulabilidade: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) -``` - -### Sobrecarga de operador com acesso à propriedade - -Se tentar somar (por exemplo) um tipo anulável (de um acesso de propriedade) com um não-anulável, em vez de soltar um aviso de erro de tempo de compilação a dizer que um dos valores é anulável, o compilador AssemblyScript só compilará em silêncio, o que abre chances para o código quebrar em meio ao tempo de execução. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // dá o erro de tempo de complação sobre anulabilidade - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // não dá erros de tempo de compilação como deveria -``` - -Nós abrimos um problema no compilador AssemblyScript para isto, mas por enquanto, se fizer estes tipos de operações nos seus mapeamentos de subgraph, vale mudá-las para fazer uma checagem de anulação antes delas. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // agora o `n` é garantido a ser um BigInt -``` - -### Inicialização de valor - -Se tiver algum código como este: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Ele fará a compilação, mas quebrará no tempo de execução porque o valor não foi inicializado. Tenha certeza de que o seu subgraph inicializou os seus valores, como assim: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -E também se tiver propriedades anuláveis numa entidade GraphQL, como assim: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -E tiver código parecido com este: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -Inicialize o valor `total.amount`, porque se tentar acessar como na última linha para a soma, ele irá travar. Então — ou inicializas primeiro: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -Ou pode simplesmente mudar o seu schema GraphQL para que não use um tipo anulável para esta propriedade, e o inicialize como zero no passo `codegen` 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // já inicializa propriedades não-anuláveis -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Iniciação de propriedade de classe - -Se exportar quaisquer classes com propriedades que sejam outras classes (declaradas por você ou pela biblioteca padrão) como esta: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -O compilador dará em erro, porque precisa adicionar um iniciador às propriedades que são classes, ou adicionar o operador `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// ou - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// ou - -export class Something { - value!: Thing -} -``` - -### Iniciação de arranjo - -A classe `Array` (arranjo) ainda aceita um número para iniciar o comprimento da lista, mas tome cuidado — porque operações como `.push` aumentarão o tamanho em vez de adicionar ao começo, por exemplo: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -Dependendo dos tipos que usa, por ex., anuláveis, e como os acessa, pode encontrar um erro de tempo de execução como este: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -Para empurrar no começo, inicialize o `Array` com o tamanho zero, assim: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -Ou pode mudá-lo através do index: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### Schema GraphQL - -Isto não é uma mudança direta no AssemblyScript, mas pode ser que precise atualizar o seu arquivo `schema.graphql`. - -Agora não há mais como definir campos nos seus tipos que são Listas Não Anuláveis. Se tiver um schema como este: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -Adicione um `!` ao membro do tipo de Lista, como: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -Isto mudou por diferenças de anulabilidade entre versões do AssemblyScript, e tem relação ao arquivo `src/generated/schema.ts` (caminho padrão, talvez tenha mudado). - -### Outras informações - -- Alinhados `Map#set` e `Set#add`, com retorno de `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Os arranjos não herdam mais do ArrayBufferView, mas agora são distintos ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Classes inicializadas de literais de objeto não podem mais definir um construtor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- O resultado de uma operação binária, se ambos os operandos forem inteiros, `**` agora é o inteiro denominador comum. Antes, o resultado era um float, como se chamasse o `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coagir o `NaN` ao `false` ao converter em `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Ao mudar um valor inteiro pequeno do tipo `i8`/`u8` ou `i16`/`u16`, apenas os 3 respectivamente 4 bits menos significantes do valor RHS afetarão o resultado, análogo ao resultado de um `i32.shl` só a ser afetado pelos 5 bits menos significantes do valor RHS. Por exemplo: `someI8 << 8` antes produzia o valor `0`, mas agora produz o `somel8` por mascarar o RHS como `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Consertado um erro de comparações relacionais de string quando os tamanhos diferem ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/pt/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/pt/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 7b94db58a11d..000000000000 --- a/website/src/pages/pt/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,540 +0,0 @@ ---- -title: Guia de migração de Validações GraphQL ---- - -Em breve, o `graph-node` apoiará a cobertura total da [especificação de Validações GraphQL](https://spec.graphql.org/June2018/#sec-Validation). - -As versões anteriores do `graph-node` não apoiavam todas as validações, e forneciam respostas mais suaves — então, em casos de ambiguidade, o `graph-node` ignorava componentes de operação inválidos do GraphQL. - -O apoio a Validações do GraphQL é o pilar para os futuros novos recursos e o desempenho em escala da Graph Network. - -Ele também garantirá o determinismo de respostas de consultas, um requisito importante na Graph Network. - -**Ativar as Validações GraphQL quebrará alguns queries existentes** enviadas à API do The Graph. - -Para cumprir tais validações, por favor siga o guia de migração. - -> ⚠️ Se não migrar os seus queries antes de ativar as validações, eles retornarão erros e possivelmente quebrarão os seus frontends/clientes. - -## Guia de migração - -Pode usar a ferramenta de migração em CLI para encontrar e consertar quaisquer problemas nas suas operações no GraphQL. De outra forma, pode atualizar o endpoint do seu cliente GraphQL para usar o endpoint `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`. Testar os seus queries perante este endpoint ajudará-lhe a encontrar os problemas neles presentes. - -> Nem todos os Subgraphs precisam ser migrados; se usar o [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) ou o [Gerador de Código GraphQL](https://the-guild.dev/graphql/codegen), eles já garantirão que os seus queries sejam válidos. - -## Ferramenta CLI de migração - -**A maioria dos erros de operação em GraphQL podem ser encontrados na sua base de código com antecedência.** - -Portanto, provemos uma experiência suave para validar as suas operações GraphQL durante a programação ou em CI. - -O [`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) é uma ferramenta CLI (interface de linha de comando) simples que ajuda a validar operações GraphQL perante um schema. - -### **Primeiros passos** - -Pode executar a ferramenta da seguinte forma: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notas:** - -- Configure ou substitua $GITHUB_USER e $SUBGRAPH_NAME com os valores adequados. Por exemplo: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- A URL de prévia do schema (https://api-next.thegraph.com/) fornecida tem um rate-limit alto e será aposentada quando todos os utilizadores migrarem à versão nova. **Não a use na produção.** -- As operações são identificadas em arquivos com as seguintes extensões: [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### Output em CLI - -A ferramenta de CLI `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` mostrará quaisquer erros de operação em GraphQL na seguinte forma: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -Para cada erro, serão exibidos uma descrição, caminho e posição de arquivo, e um atalho para um exemplo de solução (veja a secção seguinte). - -## Execute os seus queries locais contra o schema de prévia - -Nós fornecemos um endpoint `https://api-next.thegraph.com/` que executa uma versão `graph-node` com as validações ativadas. - -Para testar queries, é só enviá-las para: - -- `https://api-next.thegraph.com/subgraphs/id/` - -ou - -- `https://api-next.thegraph.com/subgraphs/name//` - -Para trabalhar com consultas marcadas com erros de validação, pode usar a sua ferramenta de query no GraphQL favorita, como Altair ou [GraphiQL](https://cloud.hasura.io/public/graphiql), e testar o seu query. Estas ferramentas também marcarão tais erros na sua interface, antes mesmo de executá-la. - -## Como resolver problemas - -Abaixo, encontrará todos os erros de validação GraphQL que podem ocorrer nas suas operações em GraphQL. - -### Variáveis, operações, fragments, ou argumentos devem ser originais - -Nós aplicamos regras para garantir que uma operação inclua um conjunto único de variáveis, operações, fragmentos e argumentos. - -Uma operação GraphQL só é válida se não conter nenhuma ambiguidade. - -Para alcançar isto, devemos garantir que alguns componentes na sua operação GraphQL sejam únicos e originais. - -Veja algumas operações inválidas que violam estas regras: - -**Nome duplicado de Query (#UniqueOperationNamesRule)** - -```graphql -# A seguinte operação violou a regra UniqueOperationName -# já que temos uma operação única com 2 queries -# com o mesmo nome -query myData { - id -} - -query myData { - name -} -``` - -_Solução:_ - -```graphql -query myData { - id -} - -query myData2 { - # renomeie o segundo query - name -} -``` - -**Nome de Fragment duplicado (#UniqueFragmentNamesRule)** - -```graphql -# A seguinte operação violou a regra de -# UniqueFragmentName. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solução:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # dê um nome único ao fragmento - metadata -} - -fragment MyFieldsName { # dê um nome único ao fragmento - name -} -``` - -**Nome de variável duplicado (#UniqueVariableNamesRule)** - -```graphql -# A seguinte operação viola a UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solução:_ - -```graphql -query myData($id: String) { - # mantenha a variável relevante (here: `$id: String`) - id - ...MyFields -} -``` - -**Nome de argumento duplicado (#UniqueArgument)** - -```graphql -# A seguinte operação violou a UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solução:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Query anónimo duplicado (#LoneAnonymousOperationRule)** - -Aliás, usar duas operações anónimas violará a regra `LoneAnonymousOperation` devido a um conflito na estrutura das respostas: - -```graphql -# Isto falhará se executado junto com uma -# operação única com os dois queries a seguir: -query { - someField -} - -query { - otherField -} -``` - -_Solução:_ - -```graphql -query { - someField - otherField -} -``` - -Ou nomeie os dois queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Campos sobrepostos - -Uma seleção GraphQL só é considerada válida se ela resolver corretamente o conjunto eventual de resultados. - -Se um conjunto de seleção, ou um campo, em específico criar ambiguidades, seja pelo campo selecionado ou pelos argumentos usados, o serviço GraphQL não conseguirá validar a operação. - -Aqui estão alguns exemplos de operações inválidas que violam esta regra: - -**Apelidos de campos em conflito (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Dar apelidos a campos pode causar conflitos, -# seja com outros apelidos ou outros campos que -# existam no schema GraphQL. -query { - dogs { - name: nickname - name - } -} -``` - -_Solução:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # apelide o campo `name` original - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** (Campos em conflito com argumentos) - -```graphql -# Argumentos diferentes podem levar a dados diferentes, -# então não podemos presumir que os campos serão os mesmos. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solução:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Aliás, em casos de uso mais complexos, é possível violar esta regra ao usar dois fragmentos que possam causar um conflito no conjunto esperado afinal: - -```graphql -query { - # Eventualmente, temos duas definições "x", - # a apontar para campos diferentes! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -Além disso, diretivas client-side no GraphQL como `@skip` e `@include` podem levar a ambiguidades, por exemplo: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[Leia mais sobre o algoritmo aqui.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Variáveis ou Fragments Não Usados - -Uma operação GraphQL só é considerada válida se todos os componentes definidos pela operação (variáveis, fragments) forem usados. - -Veja alguns exemplos de operações GraphQL que violam estas regras: - -**Variável sem uso** (#NoUnusedVariablesRule) - -```graphql -# Inválida, porque o $someVar nunca é usado. -query something($someVar: String) { - someData -} -``` - -_Solução:_ - -```graphql -query something { - someData -} -``` - -**Fragment sem uso** (#NoUnusedFragmentsRule) - -```graphql -# Inválida, porque o fragmento AllFields nunca é usado. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solução:_ - -```graphql -# Inválida, porque o fragmento AllFields nunca é usado. -query something { - someData -} - -# retire o fragmento `AllFields` -``` - -### Conjunto de seleção inválido ou em falta (#ScalarLeafsRule) - -Aliás, uma seleção de campo GraphQL só é válida se o seguinte for validado: - -- Um campo de objeto deve ter um conjunto de seleção especificado. -- Um campo de margem (scalar, enum) não deve ter um conjunto de seleção especificado. - -Aqui estão alguns exemplos de violações destas regras com o seguinte Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Conjunto de seleções inválido** - -```graphql -query { - user { - id { # Inválido por que "id" é do tipo ID e não tem subcampos - - } - } -} -``` - -_Solução:_ - -```graphql -query { - user { - id - } -} -``` - -**Conjunto de seleções em falta** - -```graphql -query { - user { - id - image # `image` exige um conjunto de seleção para subcampos! - } -} -``` - -_Solução:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Valores de Arguments incorretos (#VariablesInAllowedPositionRule) - -Operações GraphQL que passam valores em código rígido para argumentos devem ser válidas, com base no valor definido no schema. - -Aqui estão alguns exemplos de operações inválidas que violam estas regras: - -```graphql -query purposes { - # Se o "name" for definido como "String" no schema, - # esta consulta falhará durante a validação. - purpose(name: 1) { - id - } -} - -# Isto também pode acontecer quando uma variável incorreta é definida: - -query purposes($name: Int!) { - # Se o "name" for definido como `String` no schema, - # esta consulta falhará durante a validação, porque a - # variável usada é do tipo `Int` - purpose(name: $name) { - id - } -} -``` - -### Tipo, Varíavel, Fragment, ou Diretiva desconhecido (#UnknownX) - -A API do GraphQL levantará um erro se houver uso de qualquer tipo, variável, fragmento ou diretiva desconhecidos. - -Estas referências desconhecidas devem ser consertadas: - -- renomeie se for um erro gramatical -- caso contrário, remova - -### Fragment: invalid spread or definition - -(Fragment: espalhamento ou definição inválidos) - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** (Espalhamento de fragment inválido) - -Um Fragment não pode ser espalhado em um tipo não aplicável. - -Por exemplo, não podemos aplicar um fragmento `Cat` no tipo `Dog`: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Definição de Fragment inválida (#FragmentsOnCompositeTypesRule)** - -Todos os Fragments devem ser definidos sobre um tipo composto (com `on ...`), ou seja: objeto, interface ou união. - -Os seguintes exemplos são inválidos, já que definir fragments em scalars é inválido. - -```graphql -fragment fragOnScalar on Int { - # não podemos definir um fragment sobre um scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` não é um subtipo de `Dog` - somethingElse - } -} -``` - -### Uso de Diretivas - -**Directive cannot be used at this location (#KnownDirectivesRule)** (A diretiva não pode ser usada neste local) - -Apenas diretivas GraphQL (`@...`) apoiadas pela API do The Graph podem ser usadas. - -Aqui está um exemplo com as diretivas apoiadas pelo GraphQL: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Nota: `@stream`, `@live`, e `@defer` não têm apoio._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** (A diretiva só pode ser usada neste local uma vez) - -As diretivas apoiadas pelo The Graph só podem ser usadas uma vez por local. - -O seguinte é inválido (e redundante): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/pt/resources/subgraph-studio-faq.mdx b/website/src/pages/pt/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/pt/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/pt/subgraphs/_meta-titles.json b/website/src/pages/pt/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/pt/subgraphs/_meta-titles.json +++ b/website/src/pages/pt/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/pt/subgraphs/_meta.js b/website/src/pages/pt/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/pt/subgraphs/_meta.js +++ b/website/src/pages/pt/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/pt/subgraphs/best-practices/_meta.js b/website/src/pages/pt/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/pt/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/pt/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/pt/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/pt/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/pt/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/pt/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/pt/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/pt/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/pt/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/pt/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/best-practices/pruning.mdx b/website/src/pages/pt/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/pt/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/best-practices/timeseries.mdx b/website/src/pages/pt/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/pt/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/cookbook/_meta.js b/website/src/pages/pt/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/pt/subgraphs/cookbook/_meta.js +++ b/website/src/pages/pt/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/pt/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/pt/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index fd40eb79c13c..000000000000 --- a/website/src/pages/pt/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Melhores Práticas de Subgraph Parte 4 - Como Melhorar a Velocidade da Indexação ao Evitar eth_calls ---- - -## TLDR - -`eth_calls` são chamadas feitas de um subgraph a um node no Ethereum. Estas chamadas levam um bom tempo para retornar dados, o que retarda a indexação. Se possível, construa contratos inteligentes para emitir todos os dados necessários, para que não seja necessário usar `eth_calls`. - -## Por que Evitar `eth_calls` É uma Boa Prática - -Subgraphs são otimizados para indexar dados de eventos emitidos de contratos inteligentes. Um subgraph também pode indexar os dados que vêm de uma `eth_call`, mas isto pode atrasar muito a indexação de um subgraph, já que `eth_calls` exigem a realização de chamadas externas para contratos inteligentes. A capacidade de respostas destas chamadas depende não apenas do subgraph, mas também da conectividade e das respostas do node do Ethereum a ser consultado. Ao minimizar ou eliminar `eth_calls` nos nossos subgraphs, podemos melhorar muito a nossa velocidade de indexação. - -### Como É Um `eth_call`? - -`eth_calls` tendem a ser necessárias quando os dados requeridos por um subgraph não estão disponíveis via eventos emitidos. Por exemplo, vamos supor que um subgraph precisa identificar se tokens ERC20 são parte de um pool específico, mas o contrato só emite um evento `Transfer` básico e não emite um evento que contém os dados que precisamos: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suponhamos que a filiação de pool dos tokens seja determinada por um variável de estado chamado `getPoolInfo`. Neste caso, precisaríamos usar uma `eth_call` para consultar estes dados: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Atrele a instância de contrato ERC20 ao endereço dado: - let instance = ERC20.bind(event.address) - - // Retire a informação do pool via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -Isto é funcional, mas não ideal, já que ele atrasa a indexação do nosso subgraph. - -## Como Eliminar `eth_calls` - -Idealmente, o contrato inteligente deve ser atualizado para emitir todos os dados necessários dentro de eventos. Por exemplo, modificar o contrato inteligente para incluir informações de pools no evento pode eliminar a necessidade de `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -Com esta atualização, o subgraph pode indexar directamente os dados exigidos sem chamadas externas: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -Assim, o desempenho melhora muito por eliminar a necessidade de `eth_calls`. - -## Como Otimizar `eth_calls` - -Se não for possível modificar o contrato inteligente e se `eth_calls` forem necessários, leia “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” para aprender várias estratégias sobre como otimizar `eth_calls`. - -## Como Reduzir o Overhead de Runtime de `eth_calls` - -Para `eth_calls` que não podem ser eliminadas, o overhead de runtime que elas introduzem pode ser minimizado ao declará-las no manifest. Quando o`graph-node` processa um bloco, ele realiza todas as `eth_calls` em paralelo antes da execução dos handlers. Chamadas não declaradas são executadas em sequência quando os handlers são executados. A melhoria do runtime vem da execução das chamadas em paralelo, e não em sequência - isto ajuda a reduzir o tempo total gasto em chamadas, mas não o elimina por completo. - -Atualmente, `eth_calls` só podem ser declaradas para handlers de evento. No manifest, escreva - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -A porção destacada em amarelo é a declaração de chamada. A parte antes dos dois pontos é apenas um rótulo de texto usado só para mensagens de erro. A parte após os dois pontos tem a forma `Contract[address].function(params)`. Valores permissíveis para address e params são `event.address` e `event.params.`. - -O próprio handler acessa o resultado desta `eth_call` exatamente como na secção anterior ao atrelar ao contrato e fazer a chamada. o graph-node coloca em cache os resultados de `eth_calls` na memória e a chamada do handler terirará o resultado disto no cache de memória em vez de fazer uma chamada de RPC real. - -Nota: `eth_calls` declaradas só podem ser feitas em subgraphs com specVersion maior que 1.2.0. - -## Conclusão - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/pt/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 21332819995b..000000000000 --- a/website/src/pages/pt/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Boas Práticas de Subgraph 2 - Melhorar a Indexação e a Capacidade de Resposta de Queries com @derivedFrom ---- - -## Resumo - -O desempenho de um subgraph pode ser muito atrasado por arranjos no seu schema, já que esses podem crescer além dos milhares de entradas. Se possível, a diretiva `@derivedFrom` deve ser usada ao usar arranjos, já que ela impede a formação de grandes arranjos, simplifica handlers e reduz o tamanho de entidades individuais, o que melhora muito a velocidade da indexação e o desempenho dos queries. - -## Como Usar a Diretiva `@derivedFrom` - -Você só precisa adicionar uma diretiva `@derivedFrom` após o seu arranjo no seu schema. Assim: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -o `@derivedFrom` cria relações eficientes de um-para-muitos, o que permite que uma entidade se associe dinamicamente com muitas entidades relacionadas com base em um campo na entidade relacionada. Esta abordagem faz com que ambos os lados do relacionamento não precisem armazenar dados duplicados e aumenta a eficácia do subgraph. - -### Exemplo de Caso de Uso para `@derivedFrom` - -Um exemplo de um arranjo que cresce dinamicamente é uma plataforma de blogs onde um "Post" pode ter vários "Comments" (comentários). - -Vamos começar com as nossas duas entidades, `Post` e `Comment` - -Sem otimização, seria possível implementá-la assim com um arranjo: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arranjos como este, efetivamente, armazenarão dados extras de Comments no lado do Post no relacionamento. - -Aqui está uma versão otimizada que usa o `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Ao adicionar a diretiva `@derivedFrom`, este schema só armazenará os "Comentários" no lado "Comments" do relacionamento, e não no lado "Post". Os arranjos são armazenados em fileiras individuais, o que os faz crescer significativamente. Se o seu crescimento não for contido, isto pode permitir que o tamanho fique excessivamente grande. - -Isto não só aumenta a eficiência do nosso subgraph, mas também desbloqueia três características: - -1. Podemos fazer um query sobre o `Post` e ver todos os seus comentários. - -2. Podemos fazer uma pesquisa reversa e um query sobre qualquer `Comment`, para ver de qual post ele vem. - -3. Podemos usar [Carregadores de Campos Derivados](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) para desbloquear a habilidade de acessar e manipular diretamente dados de relacionamentos virtuais nos nossos mapeamentos de subgraph. - -## Conclusão - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/pt/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index b09d54c13373..000000000000 --- a/website/src/pages/pt/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Visão geral - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusão - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Outros Recursos - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index 49802e0ea200..000000000000 --- a/website/src/pages/pt/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Boas Práticas de Subgraph 3 - Como Melhorar o Desempenho da Indexação e de Queries com Entidades Imutáveis e Bytes como IDs ---- - -## TLDR - -Usar Entidades Imutáveis e Bytes como IDs no nosso arquivo `schema.graphql` [acelera muito](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) a indexação e o desempenho das queries. - -## Entidades Imutáveis - -Para fazer uma entidade imutável, basta adicionar `(immutable: true)` a uma entidade. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -Ao tornar a entidade `Transfer` imutável, o graph-node pode processá-la com mais eficácia, o que melhora as velocidades de indexação e a capacidade de resposta das queries. - -Estruturas de Entidades Imutáveis não mudarão no futuro. Uma entidade ideal para se tornar Imutável seria uma que grava diretamente dados de eventos on-chain; por exemplo, um evento `Transfer` gravado como uma entidade `Transfer`. - -### De dentro da casca - -Entidades mutáveis tem um `block range` (alcance de bloco) que indica a sua validade. Atualizar estas entidades exige que o graph-node ajuste o alcance de bloco de versões anteriores, o que aumenta a carga de trabalho do banco de dados. Queries também precisam de filtragem para encontrar apenas entidades vivas. Entidades imutáveis são mais rápidas por serem todas vivas, e como não mudam, nenhuma verificação ou atualização é necessária durante a escrita, e nenhuma filtragem é necessária durante queries. - -### Quando não usar Entidades Imutáveis - -Se tiver um campo como `status` que precise ser gradualmente modificado, então esta entidade não deve ser imutável. Fora isto, é recomendado usar entidades imutáveis sempre que possível. - -## Bytes como IDs - -Toda entidade requer uma ID. No exemplo anterior, vemos que a ID já é do tipo Bytes. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -Enquanto outros tipos de IDs são possíveis, como String e Int8, recomendamos usar o tipo Bytes para todas as IDs porque strings de caracteres ocupam o dobro do espaço ocupado por strings de Byte para armazenar dados, e comparações de strings de caracteres em UTF-8 devem levar em conta o local, que é muito mais caro que a comparação em byte usada para comparar strings em Byte. - -### Razões para Não Usar Bytes como IDs - -1. Se IDs de entidade devem ser legíveis para humanos, como IDs numéricas automaticamente incrementadas ou strings legíveis, então Bytes como IDs não devem ser usados. -2. Em caso de integração dos dados de um subgraph com outro modelo de dados que não usa Bytes como IDs, então Bytes como IDs não devem ser usados. -3. Melhorias no desempenho de indexação e queries não são desejáveis. - -### Concatenação com Bytes como IDs - -É comum em vários subgraphs usar a concatenação de strings para combinar duas propriedades de um evento em uma ID única, como o uso de `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. Mas como isto retorna um string, isto impede muito o desempenho da indexação e queries de subgraphs. - -Em vez disto, devemos usar o método `concatI32()` para concatenar propriedades de evento. Esta estratégia resulta numa ID `Bytes` que tem um desempenho muito melhor. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Organização com Bytes como IDs - -A organização com Bytes como IDs não é o melhor recurso, como visto neste exemplo de query e resposta. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Resposta de query: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -As IDs são retornadas como hex. - -Para melhorar a organização, devemos criar outro campo na entidade que seja um BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -Isto otimizará sequencialmente a organização. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Resposta de query: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusão - -É comprovado que usar Entidades Imutáveis e Bytes como IDs aumenta muito a eficiência de subgraphs. Especificamente, segundo testes, houve um aumento de até 28% no desempenho de queries e uma aceleração de até 48% em velocidades de indexação. - -Leia mais sobre o uso de Entidades Imutáveis e Bytes como IDs nesta publicação por David Lutterkort, Engenheiro de Software na Edge & Node: [Duas Melhorias Simples no Desempenho de Subgraphs](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/cookbook/pruning.mdx b/website/src/pages/pt/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index 5a3d1ac12a50..000000000000 --- a/website/src/pages/pt/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Boas Práticas de Subgraph 1 - Acelerar Queries com Pruning ---- - -## TLDR - -O [pruning](/developing/creating-a-subgraph/#prune) retira entidades de arquivo do banco de dados de um subgraph até um bloco especificado; e retirar entidades não usadas do banco de dados de um subgraph tende a melhorar muito o desempenho de queries de um subgraph. Usar o `indexerHints` é uma maneira fácil de fazer o pruning de um subgraph. - -## Como Fazer Pruning de um Subgraph com `indexerHints` - -Adicione uma secção chamada `indexerHints` ao manifest. - -O `indexerHints` tem três opções de `prune`: - -- `prune: auto`: Guarda o histórico mínimo necessário, conforme configurado pelo Indexador, para otimizar o desempenho dos queries. Esta é a configuração geralmente recomendada e é padrão para todos os subgraphs criados pela `graph-cli` >= 0.66.0. -- `prune: `: Determina um limite personalizado no número de blocos históricos a serem retidos. -- `prune: never`: Nenhum pruning de dados históricos; guarda o histórico completo e é o padrão caso não haja uma secção `indexerHints`. O `prune: never` deve ser selecionado caso queira [Queries de Viagem no Tempo](/subgraphs/querying/graphql-api/#time-travel-queries). - -Podemos adicionar `indexerHints` aos nossos subgraphs ao atualizar o nosso `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Considerações Importantes - -- Se quiser fazer [Queries de Viagem no Tempo](/subgraphs/querying/graphql-api/#time-travel-queries) junto com pruning, o pruning deve ser realizado com precisão para manter a funcionalidade das Queries de Viagem no Tempo. Portanto, não recomendamos usar `indexerHints: prune: auto` com Queries de Viagem no Tempo. Em vez disto, use `indexerHints: prune: ` para fazer um pruning preciso até uma altura de bloco que preserve os dados históricos requeridos por Queries de Viagem no Tempo, ou use o `prune: never` para manter todos os dados. - -- Não é possível criar [enxertos](/subgraphs/cookbook/grafting/) a uma altura de bloco que já tenha passado por pruning. Se enxertos forem realizados com frequência e o pruning for desejado, recomendamos usar `indexerHints: prune: ` que guardarão com precisão um número determinado de blocos (por ex., o suficiente para seis meses). - -## Conclusão - -O pruning com `indexerHints` é uma boa prática para o desenvolvimento de subgraphs que oferece melhorias significativas no desempenho de queries. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/cookbook/timeseries.mdx b/website/src/pages/pt/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 9c2a8632c5b6..000000000000 --- a/website/src/pages/pt/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Visão geral - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Considerações Importantes - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Exemplo: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Exemplo: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Exemplo: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Exemplo: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusão - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/pt/subgraphs/developing/deploying/_meta.js b/website/src/pages/pt/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/pt/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/pt/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/pt/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/pt/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 512502e6d5d4..000000000000 --- a/website/src/pages/pt/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Perguntas Frequentes do Subgraph Studio ---- - -## 1. O que é o Subgraph Studio? - -O [Subgraph Studio](https://thegraph.com/studio/) é um dapp para criar, gerir e editar subgraphs e chaves de API. - -## 2. Como criar uma Chave de API? - -Para criar uma API, navegue até o Subgraph Studio e conecte a sua carteira. Logo, clique na aba API Keys (Chaves de API); lá poderá criar uma chave API. - -## 3. Posso criar várias Chaves de API? - -Sim! Pode criar mais de uma Chave de API para usar em projetos diferentes. Confira [aqui](https://thegraph.com/studio/apikeys/). - -## 4. Como restringir um domínio para uma Chave de API? - -Após criar uma Chave de API, na seção de Segurança (Security), pode definir os domínios que podem consultar uma Chave de API específica. - -## 5. Posso transferir meu subgraph para outro dono? - -Sim. Subgraphs editados no Arbitrum One podem ser transferidos para uma nova carteira ou uma Multisig. Para isto, clique nos três pontos próximos ao botão 'Publish' (Publicar) na página de detalhes do subgraph e selecione 'Transfer ownership' (Transferir titularidade). - -Note que após a transferência, não poderá mais ver ou alterar o subgraph no Studio. - -## 6. Se eu não for o programador do subgraph que quero usar, como encontro URLs de query para subgraphs? - -A URL de query para cada subgraph está na seção Subgraph Details (Detalhes de Subgraph) do The Graph Explorer. O botão "Query" (Consulta) te levará a um painel com a URL de query do subgraph de seu interesse. Você pode então substituir o espaço `` com a chave de API que quer usar no Subgraph Studio. - -Lembre-se que, mesmo se construir um subgraph por conta própria, ainda poderá criar uma chave de API e consultar qualquer subgraph publicado na rede. Estes queries através da nova chave API são pagos, como quaisquer outros na rede. diff --git a/website/src/pages/pt/subgraphs/developing/publishing/_meta.js b/website/src/pages/pt/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/pt/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/pt/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/pt/subgraphs/querying/_meta.js b/website/src/pages/pt/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/pt/subgraphs/querying/_meta.js +++ b/website/src/pages/pt/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/ro/resources/_meta-titles.json b/website/src/pages/ro/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/ro/resources/_meta-titles.json +++ b/website/src/pages/ro/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/ro/resources/_meta.js b/website/src/pages/ro/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/ro/resources/_meta.js +++ b/website/src/pages/ro/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/ro/resources/release-notes/_meta.js b/website/src/pages/ro/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/ro/resources/release-notes/_meta.js rename to website/src/pages/ro/resources/migration-guides/_meta.js diff --git a/website/src/pages/ro/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/ro/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/ro/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/ro/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/ro/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/ro/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/ro/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/ro/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 4d909e8970a8..000000000000 --- a/website/src/pages/ro/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL Validations migration guide ---- - -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). - -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. - -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. - -It will also ensure determinism of query responses, a key requirement on The Graph Network. - -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. - -To be compliant with those validations, please follow the migration guide. - -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. - -## Migration guide - -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. - -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. - -## Migration CLI tool - -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** - -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. - -### **Getting started** - -You can run the tool as follows: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notes:** - -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### CLI output - -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). - -## Run your local queries against the preview schema - -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. - -You can try out queries by sending them to: - -- `https://api-next.thegraph.com/subgraphs/id/` - -or - -- `https://api-next.thegraph.com/subgraphs/name//` - -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. - -## How to solve issues - -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. - -### GraphQL variables, operations, fragments, or arguments must be unique - -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. - -A GraphQL operation is only valid if it does not contain any ambiguity. - -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. - -Here's an example of a few invalid operations that violates these rules: - -**Duplicate Query name (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Or name the two queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Overlapping Fields - -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. - -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. - -Here are a few examples of invalid operations that violate this rule: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Unused Variables or Fragments - -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. - -Here are a few examples for GraphQL operations that violates these rules: - -**Unused variable** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Invalid or missing Selection-Set (#ScalarLeafsRule) - -Also, a GraphQL field selection is only valid if the following is validated: - -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. - -Here are a few examples of violations of these rules with the following Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Incorrect Arguments values (#VariablesInAllowedPositionRule) - -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. - -Here are a few examples of invalid operations that violate these rules: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) - -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. - -Those unknown references must be fixed: - -- rename if it was a typo -- otherwise, remove - -### Fragment: invalid spread or definition - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** - -A Fragment cannot be spread on a non-applicable type. - -Example, we cannot apply a `Cat` fragment to the `Dog` type: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** - -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. - -The following examples are invalid, since defining fragments on scalars is invalid. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Directives usage - -**Directive cannot be used at this location (#KnownDirectivesRule)** - -Only GraphQL directives (`@...`) supported by The Graph API can be used. - -Here is an example with The GraphQL supported directives: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` are not supported._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** - -The directives supported by The Graph can only be used once per location. - -The following is invalid (and redundant): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/ro/resources/subgraph-studio-faq.mdx b/website/src/pages/ro/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/ro/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/ro/subgraphs/_meta-titles.json b/website/src/pages/ro/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/ro/subgraphs/_meta-titles.json +++ b/website/src/pages/ro/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/ro/subgraphs/_meta.js b/website/src/pages/ro/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/ro/subgraphs/_meta.js +++ b/website/src/pages/ro/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/ro/subgraphs/best-practices/_meta.js b/website/src/pages/ro/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/ro/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/ro/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/ro/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/ro/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/ro/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/ro/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/ro/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/ro/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ro/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/ro/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/best-practices/pruning.mdx b/website/src/pages/ro/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/ro/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/best-practices/timeseries.mdx b/website/src/pages/ro/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/ro/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/cookbook/_meta.js b/website/src/pages/ro/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/ro/subgraphs/cookbook/_meta.js +++ b/website/src/pages/ro/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/ro/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/ro/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index a0bd3f4ab1c2..000000000000 --- a/website/src/pages/ro/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Overview - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Additional Resources - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/ro/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/cookbook/pruning.mdx b/website/src/pages/ro/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/ro/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ro/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 0168be53d7ed..000000000000 --- a/website/src/pages/ro/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Overview - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Example: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Example: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Example: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Example: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ro/subgraphs/developing/deploying/_meta.js b/website/src/pages/ro/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/ro/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/ro/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/ro/subgraphs/developing/publishing/_meta.js b/website/src/pages/ro/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/ro/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/ro/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/ro/subgraphs/querying/_meta.js b/website/src/pages/ro/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/ro/subgraphs/querying/_meta.js +++ b/website/src/pages/ro/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/ru/resources/_meta-titles.json b/website/src/pages/ru/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/ru/resources/_meta-titles.json +++ b/website/src/pages/ru/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/ru/resources/_meta.js b/website/src/pages/ru/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/ru/resources/_meta.js +++ b/website/src/pages/ru/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/ru/resources/release-notes/_meta.js b/website/src/pages/ru/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/ru/resources/release-notes/_meta.js rename to website/src/pages/ru/resources/migration-guides/_meta.js diff --git a/website/src/pages/ru/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/ru/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/ru/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/ru/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/ru/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/ru/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/ru/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ru/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index af444be7a6e3..000000000000 --- a/website/src/pages/ru/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: Руководство по миграции AssemblyScript ---- - -До сих пор для субграфов использовалась одна из [первых версий AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Наконец, мы добавили поддержку [последней доступной версии](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 - -Это позволит разработчикам субграфов использовать более новые возможности языка AS и стандартной библиотеки. - -Это руководство применимо для всех, кто использует `graph-cli`/`graph-ts` версии ниже `0.22.0`. Если у Вас уже есть версия выше (или равная) этой, значит, Вы уже использовали версию `0.19.10` AssemblyScript 🙂 - -> Примечание. Начиная с `0.24.0`, `graph-node` может поддерживать обе версии, в зависимости от `apiVersion`, указанного в манифесте субграфа. - -## Особенности - -### Новый функционал - -- Теперь `TypedArray` можно создавать, используя `ArrayBuffer`6 с помощью [нового статического метода `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Новые функции стандартной библиотеки: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` и `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Добавлена поддержка x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Добавлен `StaticArray`, более эффективный вариант массива ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Добавлен `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Реализован аргумент `radix` для `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Добавлена поддержка разделителей в литералах с плавающей точкой ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Добавлена поддержка функций первого класса ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Добавление встроенных модулей: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Внедрение `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Добавлена поддержка литеральных строк шаблона ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Добавление `encodeURI(Component)` и `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Добавление `toString`, `toDateString` и `toTimeString` к `Date` ([v0.18.29](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Добавление `toUTCString` для `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Добавление встроенного типа `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Оптимизации - -- Функции `Math`, такие как `exp`, `exp2`, `log`, `log2` и `pow` были заменены более быстрыми вариантами ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Проведена небольшая оптимизация `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Кэширование большего количества обращений к полям в std Map и Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Оптимизация по двум степеням в `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Прочее - -- Тип литерала массива теперь можно определить по его содержимому ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Стандартная библиотека обновлена до версии Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## Как выполнить обновление? - -1. Измените мэппинги `apiVersion` в `subgraph.yaml` на `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Обновите используемый Вами `graph-cli` до `latest` версии, выполнив: - -```bash -# если он у Вас установлен глобально -npm install --global @graphprotocol/graph-cli@latest - -# или в Вашем субграфе, если он у Вас как зависимость dev -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Сделайте то же самое для `graph-ts`, но вместо глобальной установки сохраните его в своих основных зависимостях: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Следуйте остальной части руководства, чтобы исправить языковые изменения. -5. Снова запустите `codegen` и `deploy`. - -## Критические изменения - -### Обнуляемость - -В более старой версии AssemblyScript можно было создать такой код: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -Однако в новой версии, поскольку значение обнуляемо, требуется проверка, например, такая: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -Или принудительно вот такая: - -```typescript -let maybeValue = load()! // прерывается во время выполнения, если значение равно null - -maybeValue.aMethod() -``` - -Если Вы не уверены, что выбрать, мы рекомендуем всегда использовать безопасную версию. Если значение не существует, Вы можете просто выполнить раннее выражение if с возвратом в обработчике субграфа. - -### Затенение переменных - -Раньше можно было сделать [затенение переменных](https://en.wikipedia.org/wiki/Variable_shadowing) и код, подобный этому, работал: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Однако теперь это больше невозможно, и компилятор возвращает эту ошибку: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -Вам нужно будет переименовать дублированные переменные, если Вы используете затенение переменных. - -### Нулевые сравнения - -Выполняя обновление своего субграфа, иногда Вы можете получить такие ошибки: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -Чтобы решить эту проблему, Вы можете просто изменить оператор `if` на что-то вроде этого: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -Подобное относится к случаям, когда вместо == используется !=. - -### Кастинг - -Раньше для кастинга обычно использовалось ключевое слово `as`, например: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -Однако это работает только в двух случаях: - -- Примитивный кастинг (между такими типами, как `u8`, `i32`, `bool`; например: `let b: isize = 10; b as usize`); -- Укрупнение по наследованию классов (subclass → superclass) - -Примеры: - -```typescript -// примитивный кастинг -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// укрупнение по наследованию классов -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // то же, что: bytes as Uint8Array -``` - -Есть два сценария, в которых Вы можете захотеть выполнить преобразование, но использовать `as`/`var` **небезопасно**: - -- Понижение уровня наследования классов (superclass → subclass) -- Между двумя типами, имеющими общий супер класс - -```typescript -// понижение уровня наследования классов -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // перерывы в работе :( -``` - -```typescript -// между двумя типами, имеющими общий суперкласс -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // перерывы в работе :( -``` - -В таких случаях можно использовать функцию `changetype`: - -```typescript -// понижение уровня наследования классов -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // работает :) -``` - -```typescript -// между двумя типами, имеющими общий суперкласс -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // работает :) -``` - -Если Вы просто хотите удалить значение NULL, Вы можете продолжать использовать оператор `as` (или `variable`), но помните, что значение не может быть нулевым, иначе оно сломается. - -```typescript -// удалить значение NULL -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // безопасно удалить значение NULL -} - -let newBalance = new AccountBalance(balanceId) -``` - -В случае обнуления мы рекомендуем Вам обратить внимание на [функцию проверки обнуления](https://www.assemblyscript.org/basics.html#nullability-checks), это сделает ваш код чище 🙂 - -Также мы добавили еще несколько статических методов в некоторые типы, чтобы облегчить кастинг: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Проверка нулевого значения с доступом к свойству - -Чтобы применить [функцию проверки на нулевое значение](https://www.assemblyscript.org/basics.html#nullability-checks), Вы можете использовать операторы `if` или тернарный оператор (`?` и `:`) следующим образом: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// или - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -Однако это работает только тогда, когда Вы выполняете `if` / тернарную операцию для переменной, а не для доступа к свойству, например: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // не компилируется -``` - -В результате чего выдается ошибка: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -Чтобы решить эту проблему, Вы можете создать переменную для доступа к этому свойству, чтобы компилятор мог выполнять проверку допустимости значений NULL: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // компилируется просто отлично :) -``` - -### Перегрузка оператора при доступе к свойствам - -Если Вы попытаетесь суммировать (например) тип, допускающий значение Null (из доступа к свойству), с типом, не допускающим значение Null, компилятор AssemblyScript вместо того, чтобы выдать предупреждение об ошибке компиляции, предупреждающую, что одно из значений допускает значение Null, просто компилируется молча, давая возможность сломать код во время выполнения. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // выдает ошибку времени компиляции о возможности обнуления - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // не выдает ошибок времени компиляции, как это должно быть -``` - -Мы открыли вопрос по этому поводу для компилятора AssemblyScript, но пока, если Вы выполняете подобные операции в своих мэппингах субграфов, Вам следует изменить их так, чтобы перед этим выполнялась проверка на нулевое значение. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // теперь `n` гарантированно будет BigInt -``` - -### Инициализация значения - -Если у Вас есть такой код: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Он будет скомпилирован, но сломается во время выполнения. Это происходит из-за того, что значение не было инициализировано, поэтому убедитесь, что Ваш субграф инициализировал свои значения, например так: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -Также, если у Вас есть свойства, допускающие значение NULL, в объекте GraphQL, например: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -И у Вас есть код, аналогичный этому: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -Вам необходимо убедиться, что значение `total.amount` инициализировано, потому что, если Вы попытаетесь получить доступ к сумме, как в последней строке, произойдет сбой. Таким образом, Вы либо инициализируете его первым: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -Или Вы можете просто изменить свою схему GraphQL, чтобы не использовать тип, допускающий значение NULL для этого свойства. Тогда мы инициализируем его нулем на этапе `codegen` 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // уже инициализирует свойства, не допускающие значения NULL -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Инициализация свойств класса - -Если Вы экспортируете какие-либо классы со свойствами, которые являются другими классами (декларированными Вами или стандартной библиотекой), то это выглядит следующим образом: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -Компилятор выдаст ошибку, потому что Вам нужно либо добавить инициализатор для свойств, являющихся классами, либо добавить оператор `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// или - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// или - -export class Something { - value!: Thing -} -``` - -### Инициализация массива - -Класс `Array` по-прежнему принимает число для инициализации длины списка, однако Вам следует соблюдать осторожность, поскольку такие операции, как `.push`, фактически увеличивают размер, а не добавляют его в начало, например: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -В зависимости от используемых типов, например, допускающих значение NULL, и способа доступа к ним, можно столкнуться с ошибкой времени выполнения, подобной этой: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -Для того чтобы фактически начать, Вы должны либо инициализировать `Array` нулевым размером, следующим образом: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -Или Вы должны изменить его через индекс: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### Схема GraphQL - -Это не прямое изменение AssemblyScript, но Вам, возможно, придется обновить файл `schema.graphql`. - -Теперь Вы больше не можете определять поля в своих типах, которые являются списками, не допускающими значение NULL. Если у Вас такая схема: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -Вам нужно добавить `!` к элементу типа List, например, так: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -Изменение произошло из-за различий в допустимости значений NULL между версиями AssemblyScript и связано с файлом `src/generated/schema.ts` (путь по умолчанию, возможно, Вы его изменили). - -### Прочее - -- `Map#set` и `Set#add` согласованы со спецификацией, произведён возврат к `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Массивы больше не наследуются от ArrayBufferView, а являются самостоятельными ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Классы, инициализируемые из объектных литералов, больше не могут определять конструктор ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Результатом бинарной операции `**` теперь является целое число с общим знаменателем, если оба операнда являются целыми числами. Раньше результатом было число с плавающей запятой, как при вызове `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Приведение `NaN` к `false` при преобразовании в `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- При сдвиге небольшого целочисленного значения типа `i8`/`u8` или `i16`/`u16`, на результат влияют только соответственно 3 или 4 младших разряда значения RHS, аналогично тому, как при сдвиге `i32.shl` на результат влияют только 5 младших разрядов значения RHS. Пример: `someI8 << 8` ранее выдавал значение `0`, но теперь выдает значение `someI8` благодаря маскировке RHS как `8 & 7 = 0` (3 бита) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Исправлена ошибка сравнения реляционных строк при разных размерах ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/ru/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/ru/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index b7cb792259b3..000000000000 --- a/website/src/pages/ru/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: Руководство по переходу на валидацию GraphQL ---- - -Вскоре `graph-node` будет поддерживать 100-процентное покрытие [спецификации GraphQL Validation] (https://spec.graphql.org/June2018/#sec-Validation). - -Предыдущие версии `graph-node` не поддерживали все валидации и предоставляли более обтекаемые ответы, поэтому в случаях неоднозначности `graph-node` игнорировал недопустимые компоненты операций GraphQL. - -Поддержка валидации GraphQL является основой для будущих новых функций и производительности в масштабе The Graph Network. - -Это также обеспечит детерминизм ответов на запросы, что является ключевым требованием в сети The Graph. - -**Включение валидации GraphQL нарушит работу некоторых существующих запросов**, отправленных в API The Graph. - -Чтобы выполнить эти валидации, следуйте руководству по миграции. - -> ⚠️ Если Вы не перенесете свои запросы до развертывания валидаций, они будут возвращать ошибки и, возможно, повредят ваши интерфейсы/клиенты. - -## Руководство по миграции - -Вы можете использовать инструмент миграции CLI, чтобы найти любые проблемы в операциях GraphQL и исправить их. В качестве альтернативы вы можете обновить конечную точку своего клиента GraphQL, чтобы использовать конечную точку `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`. Проверка запросов на этой конечной точке поможет Вам обнаружить проблемы в Ваших запросах. - -> Не все субграфы нужно будет переносить, если Вы используете [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) или [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), они уже гарантируют корректность Ваших запросов. - -## CLI-инструмент миграции - -**Большинство ошибок операций GraphQL можно обнаружить в Вашей кодовой базе заранее.** - -По этой причине мы обеспечиваем беспрепятственный процесс валидации Ваших операций GraphQL во время разработки или в CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) — это простой инструмент командной строки, который помогает проверять операции GraphQL по заданной схеме. - -### **Начало работы** - -Вы можете запустить инструмент следующим образом: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Примечания:** - -- Установите или замените $GITHUB_USER, $SUBGRAPH_NAME соответствующими значениями. Например: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- Предоставленный URL-адрес схемы предварительного просмотра (https://api-next.thegraph.com/) сильно ограничен по скорости и будет удален после того, как все пользователи перейдут на новую версию. **Не используйте его в рабочей среде.** -- Операции идентифицируются в файлах со следующими расширениями [`.graphql`,] (https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader) [`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### Вывод CLI - -Инструмент CLI `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` будет выводить любые ошибки операций GraphQL следующим образом: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -Для каждой ошибки Вы найдете описание, путь к файлу и его положение, а также ссылку на пример решения (см. следующий раздел). - -## Выполняйте локальные запросы по схеме предварительного просмотра - -Мы предоставляем конечную точку `https://api-next.thegraph.com/`, которая запускает версию `graph-node` с включенной валидацией. - -Вы можете опробовать запросы, отправив их по адресу: - -- `https://api-next.thegraph.com/subgraphs/id/` - -или - -- `https://api-next.thegraph.com/subgraphs/name//` - -Чтобы работать с запросами, которые были помечены как имеющие ошибки валидации, Вы можете использовать свой любимый инструмент запросов GraphQL, например Altair или [GraphiQL](https://cloud.hasura.io/public/graphiql) и попробовать свой запрос. Эти инструменты также будут отмечать ошибки в своем пользовательском интерфейсе еще до того, как Вы их запустите. - -## Как решить проблемы - -Ниже Вы найдете все ошибки валидации GraphQL, которые могут возникнуть в Ваших операциях GraphQL. - -### Переменные, операции, фрагменты или аргументы GraphQL должны быть уникальными - -Мы применили правила, гарантирующие, что операция включает уникальный набор переменных, операций, фрагментов и аргументов GraphQL. - -Операция GraphQL действительна только в том случае, если она не содержит какой-либо неоднозначности. - -Для этого нам нужно убедиться, что некоторые компоненты в Вашей операции GraphQL уникальны. - -Ниже приведен пример нескольких недопустимых операций, нарушающих эти правила: - -**Повторяющееся имя запроса (#UniqueOperationNamesRule)** - -```graphql -# Следующая операция нарушила UniqueOperationName -# правило, так как у нас одна операция с 2-мя запросами -# с тем же именем -query myData { - id -} - -query myData { - name -} -``` - -_Решение:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Повторяющееся имя фрагмента (#UniqueFragmentNamesRule)** - -```graphql -# Следующая операция нарушила UniqueFragmentName -# правило. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Решение:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # присвоить фрагменту уникальное имя - metadata -} - -fragment MyFieldsName { # присвоить фрагменту уникальное имя - name -} -``` - -**Повторяющееся имя переменной (#UniqueVariableNamesRule)** - -```graphql -# Следующая операция нарушает UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Решение:_ - -```graphql -query myData($id: String) { - # сохранить соответствующую переменную (здесь: `$id: String`) - id - ...MyFields -} -``` - -**Повторяющееся имя аргумента (#UniqueArgument)** - -```graphql -# Следующая операция нарушила UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Решение:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Повторяющийся анонимный запрос (#LoneAnonymousOperationRule)** - -Кроме того, использование двух анонимных операций нарушит правило LoneAnonymousOperation из-за конфликта в структуре ответа: - -```graphql -# Это приведет к ошибке, если выполнить их вместе в -# одной операции со следующими двумя запросами: -query { - someField -} - -query { - otherField -} -``` - -_Решение:_ - -```graphql -query { - someField - otherField -} -``` - -Или назовите эти два запроса: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Перекрывающиеся поля - -Набор выбора GraphQL считается допустимым, только если он правильно разрешает конечный набор результатов. - -Если конкретный набор элементов или поле создают неоднозначность либо из-за выбранного поля, либо из-за используемых аргументов, служба GraphQL не сможет валидировать операцию. - -Вот несколько примеров недопустимых операций, нарушающих это правило: - -**Псевдонимы конфликтующих полей (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Псевдонимы полей могут вызвать конфликты либо с -# другими псевдонимами или другими полями, существующими в -# схеме GraphQL. -query { - dogs { - name: nickname - name - } -} -``` - -_Решение:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Конфликтующие поля с аргументами (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Разные аргументы могут привести к разным данным, -# поэтому мы не можем предположить, что поля будут одинаковыми. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Решение:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Кроме того, в более сложных случаях использования Вы можете нарушить это правило, используя два фрагмента, которые могут вызвать конфликт в ожидаемом в конечном итоге наборе: - -```graphql -query { - # В конце концов, у нас есть два определения "x", указывающие - # на разные поля! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -Кроме того, директивы GraphQL на стороне клиента, такие как `@skip` и `@include`, могут привести к неоднозначности, например: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[Подробнее об алгоритме можно прочитать здесь.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Неиспользуемые переменные или фрагменты - -Операция GraphQL также считается действительной, только если используются все определенные в операции компоненты (переменные, фрагменты). - -Вот несколько примеров операций GraphQL, которые нарушают эти правила: - -**Неиспользуемая переменная** (#NoUnusedVariablesRule) - -```graphql -# Неверно, потому что $someVar никогда не используется. -query something($someVar: String) { - someData -} -``` - -_Решение:_ - -```graphql -query something { - someData -} -``` - -**Неиспользуемый фрагмент** (#NoUnusedFragmentsRule) - -```graphql -# Неверно, так как фрагмент AllFields никогда не используется. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Решение:_ - -```graphql -# Неверно, так как фрагмент AllFields никогда не используется. -query something { - someData -} - -# удалить фрагмент `AllFields` -``` - -### Недопустимый или отсутствующий набор элементов выбора (#ScalarLeafsRule) - -Кроме того, выбор поля GraphQL действителен только в том случае, если подтверждено следующее: - -- Поле объекта должно иметь заданный набор выбора. -- Краевое поле (скалярное, перечислимое) не должно иметь заданного набора выбора. - -Вот несколько примеров нарушения этих правил со следующей схемой: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Недопустимый набор выбора** - -```graphql -query { - user { - id { # Неверно, так как "id" имеет тип ID и не имеет подполей - - } - } -} -``` - -_Решение:_ - -```graphql -query { - user { - id - } -} -``` - -**Отсутствует набор выбора** - -```graphql -query { - user { - id - image # `image` требует набора выбора для подполей! - } -} -``` - -_Решение:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Неверные значения аргументов (#VariablesInAllowedPositionRule) - -Операции GraphQL, которые передают жестко запрограммированные значения в аргументы, должны быть допустимыми на основе значения, определенного в схеме. - -Вот несколько примеров недопустимых операций, нарушающих эти правила: - -```graphql -query purposes { - # Если в схеме "name" определено как "String", - # этот запрос не пройдёт валидацию. - purpose(name: 1) { - id - } -} - -# Это также может произойти, если определена неверная переменная: - -query purposes($name: Int!) { - # Если "name" определено в схеме как `String`, - # этот запрос не пройдёт валидацию, потому что - # используемая переменная имеет тип `Int` - purpose(name: $name) { - id - } -} -``` - -### Неизвестный тип, переменная, фрагмент или директива (#UnknownX) - -API GraphQL вызовет ошибку, если используется какой-либо неизвестный тип, переменная, фрагмент или директива. - -Эти неизвестные ссылки необходимо исправить: - -- переименуйте, если это опечатка -- в противном случае удалите - -### Фрагмент: недопустимый спред или определение - -**Неверный разворот фрагмента (#PossibleFragmentSpreadsRule)** - -Фрагмент не может быть распространен на неприменимый тип. - -Например, мы не можем применить фрагмент `Cat` к типу `Dog`: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Недопустимое определение фрагмента (#FragmentsOnCompositeTypesRule)** - -Все фрагменты должны быть определены (используя `on ...`) для составного типа, короче говоря: для объекта, интерфейса или объединения. - -Следующие примеры недопустимы, так как определение фрагментов на скалярах недопустимо. - -```graphql -fragment fragOnScalar on Int { - # мы не можем определить фрагмент на скаляре (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` не является подтипом `Dog` - somethingElse - } -} -``` - -### Применение директив - -**Директива не может быть использована в данном месте (#KnownDirectivesRule)** - -Можно использовать только директивы GraphQL ("@..."), поддерживаемые The Graph API. - -Вот пример с директивами, поддерживаемыми GraphQL: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Примечание: `@stream`, `@live`, `@defer` не поддерживаются._ - -**Директива может быть использована в этом месте только один раз (#UniqueDirectivesPerLocationRule)** - -Директивы, поддерживаемые The Graph, можно использовать только один раз в каждом месте. - -Следующее является недопустимым (и избыточным): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/ru/resources/subgraph-studio-faq.mdx b/website/src/pages/ru/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/ru/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/ru/subgraphs/_meta-titles.json b/website/src/pages/ru/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/ru/subgraphs/_meta-titles.json +++ b/website/src/pages/ru/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/ru/subgraphs/_meta.js b/website/src/pages/ru/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/ru/subgraphs/_meta.js +++ b/website/src/pages/ru/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/ru/subgraphs/best-practices/_meta.js b/website/src/pages/ru/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/ru/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/ru/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/ru/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/ru/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/ru/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/ru/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/ru/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/ru/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ru/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/ru/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/best-practices/pruning.mdx b/website/src/pages/ru/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/ru/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/best-practices/timeseries.mdx b/website/src/pages/ru/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/ru/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/cookbook/_meta.js b/website/src/pages/ru/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/ru/subgraphs/cookbook/_meta.js +++ b/website/src/pages/ru/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/ru/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ru/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/ru/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ru/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/ru/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ru/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index cfa312c965c4..000000000000 --- a/website/src/pages/ru/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Обзор - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Дополнительные источники - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/ru/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/cookbook/pruning.mdx b/website/src/pages/ru/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/ru/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ru/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index a1fbbfc6ee87..000000000000 --- a/website/src/pages/ru/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Обзор - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Пример: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Пример: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Пример: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Пример: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ru/subgraphs/developing/deploying/_meta.js b/website/src/pages/ru/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/ru/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/ru/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/ru/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ru/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 2ca6f958d83e..000000000000 --- a/website/src/pages/ru/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Часто задаваемые вопросы о Subgraph Studio ---- - -## 1. Что такое Subgraph Studio? - -[Subgraph Studio](https://thegraph.com/studio/) — это децентрализованное приложение для создания, управления и публикации субграфов и ключей API. - -## 2. Как создать ключ API? - -Чтобы создать API, перейдите в Subgraph Studio и подключите свой кошелек. Вы сможете щелкнуть вкладку API-ключи вверху. Там Вы сможете создать ключ API. - -## 3. Могу ли я создать несколько ключей API? - -Да! Вы можете создать несколько ключей API для использования в разных проектах. Перейдите по этой [ссылке](https://thegraph.com/studio/apikeys/). - -## 4. Как мне настроить ограничения домена для ключа API? - -После создания ключа API в разделе «Безопасность» Вы можете определить домены, которые могут запрашивать определенный ключ API. - -## 5. Могу ли я передать свой субграф другому владельцу? - -Да, субграфы, которые были опубликованы в Arbitrum One, могут быть перенесены в новый кошелек или на кошелек с мультиподписью. Вы можете сделать это, щелкнув три точки рядом с кнопкой «Опубликовать» на странице сведений о субграфе и выбрав «Передать право собственности». - -Обратите внимание, что Вы больше не сможете просматривать или редактировать субграф в Studio после его переноса. - -## 6. Как мне найти URL-адреса запросов для субграфов, если я не являюсь разработчиком субграфа, который хочу использовать? - -Вы можете найти URL-адрес запроса каждого субграфа в разделе «Сведения о Субграфе» в Graph Explorer. После нажатия на кнопку «Запрос» Вы будете перенаправлены на панель, где сможете увидеть URL-адрес запроса интересующего Вас субграфа. Затем Вы можете заменить заполнитель `` ключом API, который хотите использовать в Subgraph Studio. - -Помните, что Вы можете создать ключ API и запрашивать любой субграф, опубликованный в сети, даже если сами создаете субграф. Эти запросы через новый ключ API являются платными, как и любые другие в сети. diff --git a/website/src/pages/ru/subgraphs/developing/publishing/_meta.js b/website/src/pages/ru/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/ru/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/ru/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/ru/subgraphs/querying/_meta.js b/website/src/pages/ru/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/ru/subgraphs/querying/_meta.js +++ b/website/src/pages/ru/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/sv/resources/_meta-titles.json b/website/src/pages/sv/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/sv/resources/_meta-titles.json +++ b/website/src/pages/sv/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/sv/resources/_meta.js b/website/src/pages/sv/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/sv/resources/_meta.js +++ b/website/src/pages/sv/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/sv/resources/release-notes/_meta.js b/website/src/pages/sv/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/sv/resources/release-notes/_meta.js rename to website/src/pages/sv/resources/migration-guides/_meta.js diff --git a/website/src/pages/sv/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/sv/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/sv/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/sv/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/sv/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/sv/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/sv/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/sv/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index 97c6bb95635a..000000000000 --- a/website/src/pages/sv/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: AssemblyScript Migrationsguide ---- - -Hittills har undergrafar använt en av de [första versionerna av AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Äntligen har vi lagt till stöd för den [nyaste tillgängliga versionen](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 - -Det kommer att möjliggöra för undergrafutvecklare att använda nyare funktioner i AS-språket och standardbiblioteket. - -Denna guide är tillämplig för alla som använder `graph-cli`/`graph-ts` version `0.22.0` eller lägre. Om du redan är på en högre version än (eller lika med) det, har du redan använt version `0.19.10` av AssemblyScript 🙂 - -> Observera: Från och med `0.24.0` kan `graph-node` stödja båda versionerna, beroende på `apiVersion` som anges i undergrafens manifest. - -## Funktioner - -### Ny funktionalitet - -- `TypedArray`s kan nu skapas från `ArrayBuffer`s med hjälp av [det nya `wrap`-statiska metoden](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Nya standardbiblioteksfunktioner: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` och `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Lagt till stöd för x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Lagt till `StaticArray`, en mer effektiv varian av en array ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Lagt till `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implementerat `radix`-argumentet på `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Lagt till stöd för avskiljare i flyttal ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Lagt till stöd för funktioner av första klass ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Lägg till inbyggda funktioner: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implementera `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Lagt till stöd för mallliteralsträngar ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Lägg till `encodeURI(Component)` och `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Lägg till `toString`, `toDateString` och `toTimeString` för `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Lägg till `toUTCString` för `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Lägg till inbyggd typ `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Optimeringar - -- `Math`-funktioner som `exp`, `exp2`, `log`, `log2` och `pow` har ersatts med snabbare varianter ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Lätt optimering av `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cachea fler fältåtkomster i std Map och Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimering för potenser av två i `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Annat - -- Typen för en arrayliteral kan nu härledas från dess innehåll ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Uppdaterad stdlib till Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## Hur uppgraderar du? - -1. Ändra dina mappningar `apiVersion` i `subgraph.yaml` till `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Uppdatera `graph-cli` som du använder till den `nyaste` versionen genom att köra: - -```bash -# om du har den globalt installerad -npm install --global @graphprotocol/graph-cli@latest - -# eller i din subgraf om du har det som ett utvecklingsberoende -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Gör samma sak för `graph-ts`, men istället för att installera globalt, spara den i dina huvudberoenden: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Följ resten av guiden för att åtgärda språkbrytande ändringar. -5. Kör `codegen` och `deploy` igen. - -## Språkbrytande ändringar - -### Nullbarhet - -I den äldre versionen av AssemblyScript kunde du skapa kod som detta: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -Men i den nyare versionen, eftersom värdet är nullable, måste du kontrollera, så här: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -Eller gör så här: - -```typescript -let maybeValue = load()! // bryts i runtime om värdet är null - -maybeValue.aMethod() -``` - -Om du är osäker på vilken du ska välja, rekommenderar vi alltid att använda den säkra versionen. Om värdet inte finns kanske du bara vill göra ett tidigt villkorligt uttalande med en retur i din undergrafshanterare. - -### Variabelskuggning - -Tidigare kunde du använda [variabelskuggning](https://en.wikipedia.org/wiki/Variable_shadowing) och kod som detta skulle fungera: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Men nu är detta inte längre möjligt, och kompilatorn returnerar detta fel: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -Du måste döpa om dina duplicerade variabler om du hade variabelskuggning. - -### Jämförelser med nollvärden - -När du gör uppgraderingen av din subgraf kan du ibland få fel som dessa: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -För att lösa problemet kan du helt enkelt ändra `if`-satsen till något i den här stilen: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -Samma gäller om du använder != istället för ==. - -### Kasting - -Det vanliga sättet att göra kasting tidigare var att bara använda nyckelordet `as`, som så här: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // motsvarande: byteArray -``` - -Detta fungerar dock endast i två scenarier: - -- Primitiv kasting (mellan typer som `u8`, `i32`, `bool`; t.ex. `let b: isize = 10; b as usize`); -- Uppkasting vid klassarv (underklass → överklass) - -Exempel: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -Det finns två scenarier där du kan vilja casta, men att använda `as`/`var` **är inte säkert**: - -- Downcasting vid arv av klasser (superklass → subklass) -- Mellan två typer som delar en superklass - -```typescript -// downcasting om klassarv -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// mellan två typer som delar en superklass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -I dessa fall kan du använda funktionen `changetype`: - -```typescript -// downcasting om klassarv -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// mellan två typer som delar en superklass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -Om du bara vill ta bort nullability kan du fortsätta använda `as`-operatorn (eller `variable`), men se till att du vet att värdet inte kan vara null, annars kommer det att bryta. - -```typescript -// ta bort ogiltighet -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -För nullbarhetsfallet rekommenderar vi att du tittar på [nullbarhetskontrollfunktionen](https://www.assemblyscript.org/basics.html#nullability-checks), den kommer att göra din kod renare 🙂 - -Vi har också lagt till några fler statiska metoder i vissa typer för att underlätta kastning, de är: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Kontroll av nollställbarhet med tillgång till egendom - -För att använda [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) kan du använda antingen `if`-satser eller den ternära operatorn (`?` och `:`) så här: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// or - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -Men det fungerar bara när du gör `if` / ternary på en variabel, inte på en egenskapstillgång, som den här: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // Kompilerar inte -``` - -Vilket ger detta fel: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -För att åtgärda problemet kan du skapa en variabel för den egenskapen så att kompilatorn kan utföra den magiska nollbarhetskontrollen: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // kompilerar helt okej :) -``` - -### Operatörsöverladdning med egenskapsaccess - -Om du försöker summera (till exempel) en nullable typ (från en property access) med en non nullable, kommer AssemblyScript-kompilatorn istället för att ge en kompileringsfelsvarning om att ett av värdena är nullable, bara att kompilera tyst, vilket gör att koden kan gå sönder vid körning. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // ge kompileringsfel om ogiltighet - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // ger inte kompileringsfel som det borde -``` - -Vi har öppnat en fråga om AssemblyScript-kompilatorn för detta, men om du gör den här typen av operationer i dina subgraf-mappningar bör du ändra dem så att de gör en null-kontroll innan den. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // nu är `n` garanterat ett BigInt -``` - -### Initialisering av värde - -Om du har någon kod som denna: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Det kommer att kompilera men brytas vid körning, det händer eftersom värdet inte har initialiserats, så se till att din subgraf har initialiserat sina värden, så här: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -Även om du har nullable properties i en GraphQL-entitet, som denna: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -Och du har en kod som liknar den här: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -Du måste se till att initialisera värdet `total.amount`, för om du försöker komma åt som i den sista raden för summan, kommer det att krascha. Så antingen initialiserar du det först: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -Eller så kan du bara ändra ditt GraphQL-schema för att inte använda en nullable-typ för den här egenskapen, då initierar vi den som noll i `codegen` -steget 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // initierar redan icke-nullställbara egenskaper -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Initialisering av klassegenskaper - -Om du exporterar några klasser med egenskaper som är andra klasser (deklarerade av dig eller av standardbiblioteket) på det här sättet: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -Kompilatorn kommer att göra fel eftersom du antingen måste lägga till en initialiserare för de egenskaper som är klasser, eller lägga till operatorn `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### Initialisering av Array - -Klassen `Array` accepterar fortfarande ett tal för att initiera längden på listan, men du bör vara försiktig eftersom operationer som `.push` faktiskt ökar storleken istället för att lägga till i början, till exempel: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -Beroende på vilka typer du använder, t.ex. nullable-typer, och hur du kommer åt dem, kan du stöta på ett runtime-fel som det här: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -För att faktiskt trycka i början bör du antingen initiera `Array` med storlek noll, så här: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -Eller så bör du mutera den via index: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### GraphQL-schema - -Detta är inte en direkt AssemblyScript-ändring, men du kan behöva uppdatera din `schema.graphql`-fil. - -Nu kan du inte längre definiera fält i dina typer som är Non-Nullable Lists. Om du har ett schema som detta: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -Du måste lägga till en `!` till medlemmen i List-typen, så här: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -Detta ändrades på grund av skillnader i nullbarhet mellan AssemblyScript-versioner, och det är relaterat till filen `src/generated/schema.ts` (standardväg, du kanske har ändrat detta). - -### Annat - -- Jämnade `Map#set` och `Set#add` med specifikationen, som returnerar `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrayer ärver inte längre från ArrayBufferView, men är nu distinkta ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Klasser som initialiseras från objektlitteraler kan inte längre definiera en konstruktor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Resultatet av en binär `**`-operation är nu det gemensamma nämnaren för heltal om båda operanderna är heltal. Tidigare var resultatet ett flyttal som om man anropade `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Tvinga `NaN` till `false` vid kastning till `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- När du skiftar en liten heltalsvärde av typ `i8`/`u8` eller `i16`/`u16`, påverkar endast de 3 respektive 4 minst signifikanta bitarna i RHS-värdet resultatet, analogt med resultatet av en `i32.shl` som endast påverkas av de 5 minst signifikanta bitarna i RHS-värdet. Exempel: `someI8 << 8` producerade tidigare värdet `0`, men producerar nu `someI8` på grund av maskeringen av RHS som `8 & 7 = 0` (3 bitar) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Buggfix för relationella strängjämförelser när storlekarna skiljer sig ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/sv/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/sv/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 25d4c50249e1..000000000000 --- a/website/src/pages/sv/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: Migrationsguide för GraphQL-validering ---- - -Snart kommer `graph-node` att stödja 100 % täckning av [GraphQL Valideringsspecifikationen](https://spec.graphql.org/June2018/#sec-Validation). - -Tidigare versioner av `graph-node` stödde inte alla valideringar och gav mer graciösa svar - så, i fall av oklarheter, ignorerade `graph-node` ogiltiga komponenter för GraphQL-operationer. - -Stöd för GraphQL Validering är grundläggande för de kommande nya funktionerna och prestanda vid skala för The Graph Network. - -Det kommer också att säkerställa determinism för frågesvar, en nyckelkrav på The Graph Nätverk. - -**Att aktivera GraphQL Validering kommer att bryta några befintliga frågor** som skickas till The Graph API. - -För att vara i linje med dessa valideringar, följ migrationsguiden. - -> ⚠️ Om du inte migrerar dina frågor innan valideringarna tas i bruk kommer de att returnera fel och eventuellt bryta dina frontends/klienter. - -## Migrationsguide - -Du kan använda CLI-migrationsverktyget för att hitta eventuella problem i dina GraphQL-operationer och åtgärda dem. Alternativt kan du uppdatera ändpunkten för din GraphQL-klient att använda ändpunkten `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`. Att testa dina frågor mot denna ändpunkt kommer att hjälpa dig att hitta problemen i dina frågor. - -> Inte alla subgrafer behöver migreras, om du använder [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) eller [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), ser de redan till att dina frågor är giltiga. - -## Migrations-CLI-verktyg - -**De flesta felen i GraphQL-operationer kan hittas i din kodbas i förväg.** - -Av den anledningen erbjuder vi en smidig upplevelse för validering av dina GraphQL-operationer under utveckling eller i CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) är ett enkelt CLI-verktyg som hjälper till att validera GraphQL-operationer mot ett givet schema. - -### **Komma igång** - -Du kan köra verktyget enligt följande: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Noteringar:** - -- Ange eller ersätt $GITHUB_USER, $SUBGRAPH_NAME med lämpliga värden. Som: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- Förhandsgranskningsschema-URL:en (https://api-next.thegraph.com/) som tillhandahålls har en hög begränsning för antal begäranden och kommer att fasas ut när alla användare har migrerat till den nya versionen. **Använd den inte i produktion.** -- Operationer identifieras i filer med följande filändelser [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` alternativ). - -### CLI-utdata - -Verktyget `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` för CLI kommer att ge utdata för eventuella fel i GraphQL-operationer enligt följande: - -![Felutdata från CLI](https://i.imgur.com/x1cBdhq.png) - -För varje fel hittar du en beskrivning, filväg och position, samt en länk till ett exempel på lösning (se följande avsnitt). - -## Kör dina lokala frågor mot förhandsgranskningschemat - -Vi tillhandahåller en ändpunkt `https://api-next.thegraph.com/` som kör en `graph-node`-version med aktiverad validering. - -Du kan prova att skicka frågor till: - -- `https://api-next.thegraph.com/subgraphs/id/` - -eller - -- `https://api-next.thegraph.com/subgraphs/name//` - -För att arbeta med frågor som har markerats med valideringsfel kan du använda din favorit-GraphQL-frågeverktyg, som Altair eller [GraphiQL](https://cloud.hasura.io/public/graphiql), och testa din fråga. Dessa verktyg kommer även att markera dessa fel i sitt användargränssnitt, även innan du kör det. - -## Hur man löser problem - -Här nedan finner du alla fel för validering av GraphQL som kan uppstå i dina befintliga GraphQL-operationer. - -### GraphQL-variabler, operationer, fragment eller argument måste vara unika - -Vi har tillämpat regler för att säkerställa att en operation inkluderar en unik uppsättning GraphQL-variabler, operationer, fragment och argument. - -En GraphQL-operation är endast giltig om den inte innehåller någon oklarhet. - -För att uppnå detta måste vi säkerställa att vissa komponenter i din GraphQL-operation måste vara unika. - -Här är ett exempel på några ogiltiga operationer som bryter mot dessa regler: - -**Dubbel frågenamn (#UniqueOperationNamesRule)** - -```graphql -# Följande åtgärd bröt mot UniqueOperationName -# regeln, eftersom vi har en enda åtgärd med 2 frågor -# med samma namn -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # Byt namn på den andra sökningen - namn -} -``` - -**Duplikat Fragmentets namn (#UniqueFragmentNamesRule)** - -```graphql -# Följande åtgärd bröt mot regeln UniqueFragmentName -# regel. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # tilldela ett unikt namn till fragmentet - metadata -} - -fragment MyFieldsName { # tilldela ett unikt namn till fragmentet - namn -} -``` - -**Dubbla variabelnamn (#UniqueVariableNamesRule)** - -```graphql -# Följande operation strider mot UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # behålla den relevanta variabeln (here: `$id: String`) - id - ...MyFields -} -``` - -**Dubbelnamn på argument (#UniqueArgument)** - -```graphql -# Följande åtgärd bröt mot UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Dubbel anonym fråga (#LoneAnonymousOperationRule)** - -Att använda två anonyma operationer bryter också mot regeln `LoneAnonymousOperation` på grund av konflikt i svarsstrukturen: - -```graphql -# Detta kommer att misslyckas om det utförs tillsammans i -# en enda operation med följande två frågor: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Eller namnge de två frågorna: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Överlappande fält - -En GraphQL-urvalsuppsättning anses endast vara giltig om den korrekt löser den slutliga resultatuppsättningen. - -Om en specifik urvalsuppsättning, eller ett fält, skapar tvetydighet antingen genom det valda fältet eller genom de argument som används, kommer GraphQL-tjänsten att misslyckas med att validera operationen. - -Här är några exempel på ogiltiga operationer som bryter mot denna regel: - -**Aliaser för motstridiga fält (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasfält kan orsaka konflikter, antingen med -# andra alias eller andra fält som finns i -# GraphQL-schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Motstridiga fält med argument (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Olika argument kan leda till olika data, -# så vi kan inte anta att fälten kommer att vara desamma. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -I mer komplexa användningsfall kan du också bryta mot denna regel genom att använda två fragment som kan orsaka en konflikt i den slutligen förväntade uppsättningen: - -```graphql -query { - # Till slut har vi två "x"-definitioner, som pekar - # till olika fält! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -Dessutom kan GraphQL-direktiv på klientsidan som `@skip` och `@include` leda till tvetydigheter, till exempel: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[Du kan läsa mer om algoritmen här] (https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Oanvända variabler eller fragment - -En GraphQL-operation anses också vara giltig endast om alla operationsdefinierade komponenter (variabler, fragment) används. - -Här är några exempel på GraphQL-operationer som bryter mot dessa regler: - -**Oanvänd variabel** (#NoUnusedVariablesRule) - -```graphql -# Ogiltig, eftersom $someVar aldrig används. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Oanvänt fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, eftersom fragmentet AllFields aldrig används. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalid, eftersom fragmentet AllFields aldrig används. -query something { - someData -} - -# ta bort `AllFields` fragmentet -``` - -### Ogiltig eller saknad urvalsuppsättning (#ScalarLeafsRule) - -Dessutom är ett GraphQL-fältval endast giltigt om följande är validerat: - -- Ett objektfält måste ha en valuppsättning angiven. -- Ett edge-fält (scalar, enum) får inte ha en specificerad urvalsuppsättning. - -Här är några exempel på brott mot dessa regler med följande Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Ogiltig urvalsuppsättning** - -```graphql -query { - user { - id { # Invalid, eftersom "id" är av typen ID och inte xhar underfält - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missande urvalsgrupp** - -```graphql -query { - user { - id - image # `image` kräver en urvalssats för underfält! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Felaktiga argumentvärden (#VariablesInAllowedPositionRule) - -GraphQL-operationer som skickar hårdkodade värden till argument måste vara giltiga, baserat på det värde som definieras i schemat. - -Här följer några exempel på ogiltiga operationer som bryter mot dessa regler: - -```graphql -query purposes { - # Om "name" är definierat som "String" i schemat, - # kommer denna fråga att misslyckas under valideringen. - purpose(name: 1) { - id - } -} - -# Detta kan också hända när en felaktig variabel definieras: - -query purposes($name: Int!) { - # Om "name" är definierat som `String` i schemat, - # kommer denna fråga att misslyckas under valideringen, eftersom - # variabeln som används är av typen `Int` - purpose(name: $name) { - id - } -} -``` - -### Okänd typ, variabel, fragment eller direktiv (#UnknownX) - -GraphQL API kommer att ge ett felmeddelande om någon okänd typ, variabel, fragment eller direktiv används. - -Dessa okända referenser måste åtgärdas: - -- Byt namn om det var ett stavfel -- annars, ta bort - -### Fragment: ogiltig spridning eller definition - -**Ogiltig spridning av fragment (#PossibleFragmentSpreadsRule)** - -Ett Fragment kan inte spridas på en icke tillämplig typ. - -Exempel: Vi kan inte tillämpa ett `Cat`-fragment på `Dog`-typen: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Ogiltig fragmentdefinition (#FragmentsOnCompositeTypesRule)** - -Alla Fragment måste definieras på (med `on ...`) en sammansatt typ, kort sagt: objekt, gränssnitt eller union. - -Följande exempel är ogiltiga, eftersom det är ogiltigt att definiera fragment på skalärer. - -```graphql -fragment fragOnScalar on Int { -# vi kan inte definiera ett fragment på en skalär (`Int`) - något -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` är inte en subtyp av `Dog` - somethingElse - } -} -``` - -### Användning av direktiv - -**Direktiv kan inte användas på denna plats (#KnownDirectivesRule)** - -Endast GraphQL-direktiv (`@...`) som stöds av The Graph API kan användas. - -Här är ett exempel med de GraphQL-direktiv som stöds: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` stöds inte._ - -**Direktivet kan endast användas en gång på denna plats (#UniqueDirectivesPerLocationRule)** - -De direktiv som stöds av The Graf kan endast användas en gång per plats. - -Följande är ogiltigt (och överflödigt): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/sv/resources/subgraph-studio-faq.mdx b/website/src/pages/sv/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/sv/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/sv/subgraphs/_meta-titles.json b/website/src/pages/sv/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/sv/subgraphs/_meta-titles.json +++ b/website/src/pages/sv/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/sv/subgraphs/_meta.js b/website/src/pages/sv/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/sv/subgraphs/_meta.js +++ b/website/src/pages/sv/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/sv/subgraphs/best-practices/_meta.js b/website/src/pages/sv/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/sv/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/sv/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/sv/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/sv/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/sv/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/sv/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/sv/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/sv/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/sv/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/sv/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/best-practices/pruning.mdx b/website/src/pages/sv/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/sv/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/best-practices/timeseries.mdx b/website/src/pages/sv/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/sv/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/cookbook/_meta.js b/website/src/pages/sv/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/sv/subgraphs/cookbook/_meta.js +++ b/website/src/pages/sv/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/sv/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/sv/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index cd68f5b32a38..000000000000 --- a/website/src/pages/sv/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Översikt - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Ytterligare resurser - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/sv/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/cookbook/pruning.mdx b/website/src/pages/sv/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/sv/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/cookbook/timeseries.mdx b/website/src/pages/sv/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 76678087aca2..000000000000 --- a/website/src/pages/sv/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Översikt - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Exempel: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Exempel: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Exempel: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Exempel: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/sv/subgraphs/developing/deploying/_meta.js b/website/src/pages/sv/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/sv/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/sv/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 27af4467124b..000000000000 --- a/website/src/pages/sv/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Vanliga frågor om Subgraf Studio ---- - -## 1. Vad är Subgraf Studio? - -[Subgraf Studio](https://thegraph.com/studio/) är en dapp för att skapa, hantera och publicera undergrafer och API-nycklar. - -## 2. Hur skapar jag en API-nyckel? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. Kan jag skapa flera API-nycklar? - -Ja, du kan skapa flera API-nycklar som du kan använda i olika projekt. Kolla in länken[ här](https://thegraph.com/studio/apikeys/). - -## 4. Hur begränsar jag en domän för en API-nyckel? - -När du har skapat en API-nyckel kan du i avsnittet Säkerhet definiera vilka domäner som kan ställa frågor till en specifik API-nyckel. - -## 5. Kan jag överföra min subgraf till en annan ägare? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -Observera att du inte längre kommer att kunna se eller redigera undergrafen i Studio när den har överförts. - -## 6. Hur hittar jag fråge-URL: er för undergrafer om jag inte är utvecklaren av den undergraf jag vill använda? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -Kom ihåg att du kan skapa en API-nyckel och ställa frågor till alla undergrafer som publicerats i nätverket, även om du själv har byggt en undergraf. Dessa förfrågningar via den nya API-nyckeln är betalda förfrågningar som alla andra i nätverket. diff --git a/website/src/pages/sv/subgraphs/developing/publishing/_meta.js b/website/src/pages/sv/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/sv/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/sv/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/sv/subgraphs/querying/_meta.js b/website/src/pages/sv/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/sv/subgraphs/querying/_meta.js +++ b/website/src/pages/sv/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/tr/resources/_meta-titles.json b/website/src/pages/tr/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/tr/resources/_meta-titles.json +++ b/website/src/pages/tr/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/tr/resources/_meta.js b/website/src/pages/tr/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/tr/resources/_meta.js +++ b/website/src/pages/tr/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/tr/resources/release-notes/_meta.js b/website/src/pages/tr/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/tr/resources/release-notes/_meta.js rename to website/src/pages/tr/resources/migration-guides/_meta.js diff --git a/website/src/pages/tr/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/tr/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/tr/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/tr/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/tr/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/tr/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/tr/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/tr/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 3d1d0f76fb6b..000000000000 --- a/website/src/pages/tr/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL Validasyon Geçiş Kılavuzu ---- - -Yakında "graph-node", [GraphQL Validasyon Özelliklerinin](https://spec.graphql.org/June2018/#sec-Validation)'in %100'ünü destekleyecektir. - -"graph-node"un önceki sürümleri tüm doğrulamaları desteklemiyordu ve daha zarif yanıtlar veriyordu - bu nedenle, belirsizlik durumlarında "graph-node" geçersiz GraphQL işlem bileşenlerini görmezden geliyordu. - -GraphQL validasyon desteği, yaklaşan yeni özelliklerin ve Graph Ağı ölçeğindeki performansın temel direğidir. - -Ayrıca, Graph ağında önemli bir gereklilik olan sorgu yanıtlarının belirleyiciliğini de sağlayacaktır. - -**GraphQL validasyonlarını etkinleştirmek, Graph API'ye gönderilen bazı mevcut sorguları bozacaktır**. - -Bu doğrulamalarla uyumlu olmak için lütfen taşıma kılavuzunu takip edin. - -> ⚠️ Doğrulamalar kullanıma sunulmadan önce sorgularınızı taşımazsanız, bunlar hata döndürecek ve muhtemelen ön uçlarınızı/istemcilerinizi bozacaktır. - -## Taşıma Kılavuzu - -GraphQL işlemlerinizdeki sorunları bulmak ve düzeltmek için CLI taşıma aracını kullanabilirsiniz. Alternatif olarak, `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` uç noktasını kullanmak için GraphQL istemcinizin uç noktasını güncelleyebilirsiniz. Sorgularınızı bu uç noktaya göre test etmek, sorgularınızdaki sorunları bulmanıza yardımcı olacaktır. - -> Tüm subgraph'lerin taşınması gerekmez, [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen) kullanıyorsanız zaten sorgularınızın geçerli olmasını sağlarlar. - -## Geçiş CLI Aracı - -**GraphQL işlem hatalarının çoğu kod tabanınızda önceden bulunabilir.** - -Bu nedenle, geliştirme sırasında veya CI'de GraphQL işlemlerinizi doğrulamak için sorunsuz bir deneyim sağlıyoruz. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate), GraphQL işlemlerini belirli bir şemaya göre doğrulamaya yardımcı olan basit bir CLI aracıdır. - -### **Başlarken** - -Aracı aşağıdaki gibi çalıştırabilirsiniz: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notlar:** - -- $GITHUB_USER, $SUBGRAPH_NAME değerini uygun değerlerle ayarlayın veya değiştirin. [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) gibi. -- Sağlanan önizleme şeması URL'si (https://api-next.thegraph.com/) büyük oranda hız sınırlamasına sahiptir ve tüm kullanıcılar yeni sürüme geçtikten sonra kullanımdan kaldırılacaktır. **Çıktıda kullanmayın.** -- İşlemler, aşağıdaki [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option) uzantılarına sahip dosyalarda tanımlanır. - -### CLI Çıktısı - -`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI aracı, tüm GraphQL işlemleri hatalarını aşağıdaki gibi verir: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -Her hata için bir açıklama, dosya yolu ve konumu ve bir çözüm örneğine bağlantı bulacaksınız (aşağıdaki bölüme göz atın). - -## Yerel sorgularınızı önizleme şemasına göre çalıştırın - -Doğrulamaların açık olduğu bir "Graph Node" sürümünü çalıştıran bir uç nokta `https://api-next.thegraph.com/` sağlıyoruz. - -Sorguları şu adrese göndererek deneyebilirsiniz: - -- `https://api-next.thegraph.com/subgraphs/id/` - -yada - -- `https://api-next.thegraph.com/subgraphs/name//` - -Doğrulama hataları içerdiği işaretlenen sorgular üzerinde çalışmak için Altair veya [GraphiQL](https://cloud.hasura.io/public/graphiql) gibi en sevdiğiniz GraphQL sorgulama aracını kullanabilir ve sorgunuzu deneyebilirsiniz. Bu araçlar, siz çalıştırmadan önce bile kullanıcı arayüzlerinde bu hataları işaretleyecektir. - -## Sorunları nasıl çözeceğiz? - -Aşağıda, mevcut GraphQL işlemlerinizde meydana gelebilecek tüm GraphQL doğrulama hatalarını bulacaksınız. - -### GraphQL değişkenleri, işlemleri, parçaları veya bağımsız değişkenleri benzersiz olmalıdır - -Bir işlemin benzersiz bir GraphQL değişkenleri, işlemler, parçalar ve bağımsız değişkenler kümesi içermesini sağlamak için kurallar uyguladık. - -Bir GraphQL işlemi, yalnızca herhangi bir belirsizlik içermiyorsa geçerlidir. - -Bunu başarmak için, GraphQL işleminizdeki bazı bileşenlerin benzersiz olmasını sağlamamız gerekiyor. - -Aşağıda, bu kuralları ihlal eden birkaç geçersiz işleme örnek verilmiştir: - -**Yinelenen Sorgu Adı (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Ayrıca, iki anonim işlemin kullanılması, yanıt yapısındaki çakışma nedeniyle `LoneAnonymousOperation` kuralını ihlal edecektir: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Veya iki sorguyu adlandırın: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Çakışan Alanlar - -Bir GraphQL seçim seti, yalnızca nihai sonuç setini doğru bir şekilde çözerse geçerli kabul edilir. - -Belirli bir seçim kümesi veya bir alan, seçilen alan veya kullanılan bağımsız değişkenler nedeniyle belirsizlik yaratırsa, GraphQL hizmeti işlemi doğrulamada başarısız olur. - -Bu kuralı ihlal eden geçersiz işlemlere birkaç örnek: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Ayrıca, daha karmaşık kullanım durumlarında, sonunda beklenen kümede bir çakışmaya neden olabilecek iki parça kullanarak bu kuralı ihlal edebilirsiniz: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -Buna ek olarak, "@skip" ve "@include" gibi müşteri tarafı GraphQL yönergeleri belirsizliğe yol açabilir, örneğin: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[Algoritma hakkında daha fazla bilgiyi buradan edinebilirsiniz.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Kullanılmayan Değişkenler veya Parçalar - -Bir GraphQL işlemi, yalnızca tüm işlem tanımlı bileşenler (değişkenler, parçalar) kullanılıyorsa geçerli kabul edilir. - -İşte bu kuralları ihlal eden GraphQL işlemleri için birkaç örnek: - -**Kullanılmayan Değişken** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Geçersiz veya Eksik Seçim Kümesi (#ScalarLeafsRule) - -Ayrıca, bir GraphQL alan seçimi yalnızca aşağıdakiler doğrulanırsa geçerlidir: - -- Bir nesne alanında olması gereken seçim kümesi belirtildi. -- Bir kenar alanı (scalar, enum) belirtilen bir seçim kümesine sahip olmamalıdır. - -Aşağıda, aşağıdaki şema ile bu kuralların ihlaline ilişkin birkaç örnek verilmiştir: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Yanlış Bağımsız Değişken Değerleri (#VariablesInAllowedPositionRule) - -Sabit kodlu değerleri bağımsız değişkenlere ileten GraphQL işlemleri, şemada tanımlanan değere göre geçerli olmalıdır. - -Aşağıda, bu kuralları ihlal eden geçersiz işlemlere ilişkin birkaç örnek verilmiştir: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# Bu, yanlış bir değişken tanımlandığında da olabilir: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Bilinmeyen Tür, Değişken, Parça veya Yönerge (#UnknownX) - -Herhangi bir bilinmeyen tür, değişken, parça veya yönerge kullanılırsa GraphQL API bir hata verir. - -Bu bilinmeyen referanslar düzeltilmelidir: - -- bir yazım hatasıysa yeniden adlandır -- aksi halde kaldır - -### Parça: Geçersiz Yayılma Veya Tanım - -**Geçersiz Parça Yayılması (#PossibleFragmentSpreadsRule)** - -Bir parça, geçerli olmayan bir türe yayılamaz. - -Örnek olarak, "Dog" türüne bir "Cat" parçası uygulayamayız: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Geçersiz Fragment Tanımı (#FragmentsOnCompositeTypesRule)** - -Tüm parçalar ('on ...' kullanılarak) bir bileşik tipte tanımlanmalıdır, kısacası: nesne, arayüz veya birleşim. - -Aşağıdaki örnekler geçersizdir, çünkü skalerler üzerinde parça tanımlama geçersizdir. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Direktif kullanımı - -**Yönerge bu konumda kullanılamaz (#KnownDirectivesRule)** - -Yalnızca Graph API tarafından desteklenen GraphQL yönergeleri ("@...") kullanılabilir. - -İşte GraphQL tarafından desteklenen direktiflere bir örnek: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Not: `@stream`, `@live`, `@defer` desteklenmez._ - -**Yönerge bu konumda yalnızca bir kez kullanılabilir (#UniqueDirectivesPerLocationRule)** - -Graph tarafından desteklenen direktifler, lokasyon başına sadece bir kez kullanılabilir. - -Aşağıdakiler geçersiz (ve gereksiz): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/tr/resources/subgraph-studio-faq.mdx b/website/src/pages/tr/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/tr/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/tr/subgraphs/_meta-titles.json b/website/src/pages/tr/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/tr/subgraphs/_meta-titles.json +++ b/website/src/pages/tr/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/tr/subgraphs/_meta.js b/website/src/pages/tr/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/tr/subgraphs/_meta.js +++ b/website/src/pages/tr/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/tr/subgraphs/best-practices/_meta.js b/website/src/pages/tr/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/tr/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/tr/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/tr/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/tr/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/tr/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/tr/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/tr/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/tr/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/tr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/tr/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/best-practices/pruning.mdx b/website/src/pages/tr/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/tr/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/best-practices/timeseries.mdx b/website/src/pages/tr/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/tr/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/cookbook/_meta.js b/website/src/pages/tr/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/tr/subgraphs/cookbook/_meta.js +++ b/website/src/pages/tr/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/tr/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/tr/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/tr/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/tr/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/tr/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/tr/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 1169b1e2b4eb..000000000000 --- a/website/src/pages/tr/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Genel Bakış - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Ek Kaynaklar - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/tr/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/cookbook/pruning.mdx b/website/src/pages/tr/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/tr/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/cookbook/timeseries.mdx b/website/src/pages/tr/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index e7b242bfa2f3..000000000000 --- a/website/src/pages/tr/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Genel Bakış - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Örnek: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Örnek: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Örnek: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Örnek: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/tr/subgraphs/developing/deploying/_meta.js b/website/src/pages/tr/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/tr/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/tr/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/tr/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/tr/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 8213fdbbac4a..000000000000 --- a/website/src/pages/tr/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Subgrap Studio Hakkında SSS ---- - -## 1. Subgraph Stüdyo Nedir? - -[Subgraph Studio](https://thegraph.com/studio/), subgraph'ler ve API anahtarları oluşturmak, yönetmek ve yayımlamaya yarayan bir dapp'tir. - -## 2. API Anahtarını Nasıl Oluşturabilirim? - -Bir API oluşturmak için Subgraph Studio'ya gidin ve cüzdanınızı bağlayın. Üst kısımda API anahtarları sekmesine tıklayabilirsiniz. Orada bir API anahtarı oluşturabileceksiniz. - -## 3. Birden Çok API Anahtarı Oluşturabilir miyim? - -Evet! Farklı projelerde kullanmak için birden fazla API anahtarı oluşturabilirsiniz. Daha fazla bilgi için [buraya](https://thegraph.com/studio/apikeys/) göz atın. - -## 4. API Anahtarı için Domain'i Nasıl Kısıtlarım? - -Bir API Anahtarı oluşturduktan sonra, Güvenlik bölümünde belirli bir API Anahtarını sorgulayabilecek alanları tanımlayabilirsiniz. - -## 5. Subgraph'ımı Başka Birine Devredebilir miyim? - -Evet, Arbitrum One'da yayımlanmış subgraph'ler yeni bir cüzdana veya bir Multisig'e aktarılabilir. Bunu, subgraph'in ayrıntılar sayfasında 'Yayımla' düğmesinin yanındaki üç noktaya tıklayıp 'Sahipliği devret' seçeneğini seçerek yapabilirsiniz. - -Subgraph'i devrettikten sonra onu Studio'da artık göremeyeceğinizi veya düzenleyemeyeceğinizi unutmayın. - -## 6. Kullanmak İstediğim Subgraph'ın Geliştiricisi Değilsem, bu Subgraphlar için Sorgu URL'lerini Nasıl Bulabilirim? - -Her bir subgraph'in sorgu URL'sini Graph Gezgini'ndeki Subgraph Ayrıntıları bölümünde bulabilirsiniz. “Sorgula” düğmesine tıkladığınızda, ilgilendiğiniz subgraph'in sorgu URL'sini görüntüleyebileceğiniz bir panele yönlendirilirsiniz. Ardından, `` yer tutucusunu Subgraph Studio’da kullanmak istediğiniz API anahtarı ile değiştirebilirsiniz. - -Unutmayın, bir API anahtarı oluşturarak ağda yayımlanmış herhangi bir subgraph'i sorgulayabilirsiniz; bu durum, kendi subgraph'inizi oluşturmuş olsanız bile geçerlidir. Bu yeni API anahtarı üzerinden yapılan sorgular, ağdaki diğer sorgular gibi ücretlidir. diff --git a/website/src/pages/tr/subgraphs/developing/publishing/_meta.js b/website/src/pages/tr/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/tr/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/tr/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/tr/subgraphs/querying/_meta.js b/website/src/pages/tr/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/tr/subgraphs/querying/_meta.js +++ b/website/src/pages/tr/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/uk/resources/_meta-titles.json b/website/src/pages/uk/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/uk/resources/_meta-titles.json +++ b/website/src/pages/uk/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/uk/resources/_meta.js b/website/src/pages/uk/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/uk/resources/_meta.js +++ b/website/src/pages/uk/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/uk/resources/release-notes/_meta.js b/website/src/pages/uk/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/uk/resources/release-notes/_meta.js rename to website/src/pages/uk/resources/migration-guides/_meta.js diff --git a/website/src/pages/uk/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/uk/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/uk/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/uk/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/uk/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/uk/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/uk/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/uk/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 4d909e8970a8..000000000000 --- a/website/src/pages/uk/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL Validations migration guide ---- - -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). - -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. - -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. - -It will also ensure determinism of query responses, a key requirement on The Graph Network. - -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. - -To be compliant with those validations, please follow the migration guide. - -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. - -## Migration guide - -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. - -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. - -## Migration CLI tool - -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** - -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. - -### **Getting started** - -You can run the tool as follows: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notes:** - -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### CLI output - -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). - -## Run your local queries against the preview schema - -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. - -You can try out queries by sending them to: - -- `https://api-next.thegraph.com/subgraphs/id/` - -or - -- `https://api-next.thegraph.com/subgraphs/name//` - -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. - -## How to solve issues - -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. - -### GraphQL variables, operations, fragments, or arguments must be unique - -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. - -A GraphQL operation is only valid if it does not contain any ambiguity. - -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. - -Here's an example of a few invalid operations that violates these rules: - -**Duplicate Query name (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Or name the two queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Overlapping Fields - -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. - -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. - -Here are a few examples of invalid operations that violate this rule: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Unused Variables or Fragments - -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. - -Here are a few examples for GraphQL operations that violates these rules: - -**Unused variable** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Invalid or missing Selection-Set (#ScalarLeafsRule) - -Also, a GraphQL field selection is only valid if the following is validated: - -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. - -Here are a few examples of violations of these rules with the following Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Incorrect Arguments values (#VariablesInAllowedPositionRule) - -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. - -Here are a few examples of invalid operations that violate these rules: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) - -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. - -Those unknown references must be fixed: - -- rename if it was a typo -- otherwise, remove - -### Fragment: invalid spread or definition - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** - -A Fragment cannot be spread on a non-applicable type. - -Example, we cannot apply a `Cat` fragment to the `Dog` type: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** - -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. - -The following examples are invalid, since defining fragments on scalars is invalid. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Directives usage - -**Directive cannot be used at this location (#KnownDirectivesRule)** - -Only GraphQL directives (`@...`) supported by The Graph API can be used. - -Here is an example with The GraphQL supported directives: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` are not supported._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** - -The directives supported by The Graph can only be used once per location. - -The following is invalid (and redundant): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/uk/resources/subgraph-studio-faq.mdx b/website/src/pages/uk/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/uk/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/uk/subgraphs/_meta-titles.json b/website/src/pages/uk/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/uk/subgraphs/_meta-titles.json +++ b/website/src/pages/uk/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/uk/subgraphs/_meta.js b/website/src/pages/uk/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/uk/subgraphs/_meta.js +++ b/website/src/pages/uk/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/uk/subgraphs/best-practices/_meta.js b/website/src/pages/uk/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/uk/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/uk/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/uk/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/uk/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/uk/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/uk/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/uk/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/uk/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/uk/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/uk/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/best-practices/pruning.mdx b/website/src/pages/uk/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/uk/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/best-practices/timeseries.mdx b/website/src/pages/uk/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/uk/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/cookbook/_meta.js b/website/src/pages/uk/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/uk/subgraphs/cookbook/_meta.js +++ b/website/src/pages/uk/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/uk/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/uk/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index a0bd3f4ab1c2..000000000000 --- a/website/src/pages/uk/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Overview - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Additional Resources - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/uk/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/cookbook/pruning.mdx b/website/src/pages/uk/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/uk/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/cookbook/timeseries.mdx b/website/src/pages/uk/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 0168be53d7ed..000000000000 --- a/website/src/pages/uk/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Overview - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Example: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Example: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Example: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Example: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/uk/subgraphs/developing/deploying/_meta.js b/website/src/pages/uk/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/uk/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/uk/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/uk/subgraphs/developing/publishing/_meta.js b/website/src/pages/uk/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/uk/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/uk/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/uk/subgraphs/querying/_meta.js b/website/src/pages/uk/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/uk/subgraphs/querying/_meta.js +++ b/website/src/pages/uk/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/ur/resources/_meta-titles.json b/website/src/pages/ur/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/ur/resources/_meta-titles.json +++ b/website/src/pages/ur/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/ur/resources/_meta.js b/website/src/pages/ur/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/ur/resources/_meta.js +++ b/website/src/pages/ur/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/ur/resources/release-notes/_meta.js b/website/src/pages/ur/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/ur/resources/release-notes/_meta.js rename to website/src/pages/ur/resources/migration-guides/_meta.js diff --git a/website/src/pages/ur/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/ur/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/ur/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/ur/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/ur/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/ur/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/ur/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/ur/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index 31439d43c505..000000000000 --- a/website/src/pages/ur/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: اسمبلی سکرپٹ مائیگریشن گائیڈ ---- - -اب تک، سب گراف [اسمبلی اسکرپٹ کے پہلے ورژن](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) میں سے ایک استعمال کر رہے ہیں۔ آخر کار ہم نے [جدید ترین دستیاب](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) کے لیے تعاون شامل کر دیا ہے! 🎉 - -وہ سب گراف ڈویلپرز کو اسمبلی لینگوج اور سٹینڈرڈ لائبریری کی نئ خصوصیات استعمال کرنے پر فعال کرے گا. - -یہ ہدایت نامہ `graph-cli`/`graph-ts` ورژن `0.22.0` کے نیچے استعمال کرنے والے ہر فرد پر لاگو ہوتا ہے۔ اگر آپ پہلے ہی اس سے زیادہ (یا برابر) ورژن پر ہیں، تو آپ اسمبلی اسکرپٹ کا ورژن `0.19.10` پہلے ہی استعمال کر رہے ہیں 🙂 - -> نوٹ: `0.24.0` کے مطابق، `graph-node` سب گراف مینی فیسٹ میں متعین `apiVersion` کی بنیاد پر، دونوں ورژنز کو سپورٹ کر سکتا ہے. - -## خصوصیات - -### نئ خصوصیت - -- `TypedArray`s اب `ArrayBuffer`s سے [نیا ` wrap` جامد طریقہ](https://www.assemblyscript.org/stdlib/typedarray.html#static-members کا استعمال کر کے بنایا جا سکتا ہے) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- لائبریری کے نئے معیاری فنکشنز: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` اور `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) کے x مثال کے لئے تعاون شامل کیا گیا -- شامل کیا گیا `StaticArray`، ایک زیادہ موثر ایرے ویرینٹ ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- شامل کیا گیا `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- `Number#toString` پر `radix` دلیل کو نافذ کیا گیا ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- فلوٹنگ پوائنٹ لٹریلز ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) میں الگ کرنے والوں کے لیے تعاون شامل کیا گیا -- فرسٹ کلاس فنکشنز ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) کے لیے شامل کردہ تعاون -- بلٹ انز شامل کریں: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- `Array/TypedArray/String#at` لاگو کریں ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- تمثیل کے لغوی سٹرنگز کے لیے شامل کیا گیا تعاون ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- `encodeURI(Component)` اور `decodeURI(ج Component)` شامل کریں ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- `toString`، `toDateString` اور `toTimeString` کو `Date` میں شامل کریں ([v0.18.29](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- `Date` کے لیے `toUTCSstring` شامل کریں ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- شامل کریں `nonnull/NonNullable` بلٹ ان قسم ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### اصلاح - -- `Math` فنکشنز جیسے `exp`، `exp2`، `log`، `log2` اور ` pow` کو تیز تر متغیرات سے بدل دیا گیا ہے ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- `Math.mod` کو تھوڑا سا بہتر بنائیں ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Std میپ اور سیٹ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) میں مزید فیلڈ رسائی کیش کریں -- `ipow32/64` میں دو کی طاقتوں کو بہتر بنائیں ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### دوسرا - -- ایک ایرے کے لغوی کی قسم کا اندازہ اب اس کے مواد سے لگایا جا سکتا ہے ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Stdlib کو یونیکوڈ 13.0.0 میں اپ ڈیٹ کیا گیا ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## اپ گریڈ کیسے کریں؟ - -1. اپنی میپنگز `apiVersion` کو `subgraph.yaml` میں `0.0.6` میں تبدیل کریں: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. `graph-cli` کو اپ ڈیٹ کریں جسے آپ `تازہ ترین` ورژن میں استعمال کر رہے ہیں یہ چلا کر: - -```bash -# if you have it globally installed -npm install --global @graphprotocol/graph-cli@latest - -# or in your subgraph if you have it as a dev dependency -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. `graph-ts` کے لیۓ بھی ایسا ہی کریں، لیکن عالمی سطح پر انسٹال کرنے کے بجائے، اسے اپنے اہم انحصار میں محفوظ کریں: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. زبان کو توڑنے والی تبدیلیوں کو ٹھیک کرنے کے لیے بقیہ گائیڈ پر عمل کریں. -5. `codegen` اور `deploy` کو دوباری چلائیں. - -## بریکنگ تبدیلیاں - -### کالعدم ہونا - -اسمبلی اسکرپٹ کے پرانے ورژن پر، آپ اس طرح کا کوڈ بنا سکتے ہیں: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -تاہم نئے ورژن پر، کیونکہ ویلیو کالعدم ہے، اس کے لیے آپ کو اس طرح چیک کرنے کی ضرورت ہے: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -یا اسے اس طرح فورس کریں: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -اگر آپ کو یقین نہیں ہے کہ کون سا انتخاب کرنا ہے، تو ہم ہمیشہ محفوظ ورژن استعمال کرنے کی تجویز کرتے ہیں۔ اگر ویلیو موجود نہیں ہے تو آپ اپنے سب گراف ہینڈلر میں واپسی کے ساتھ صرف ابتدائی اف سٹیٹمینٹ کرنا چاہتے ہیں. - -### متغیر شیڈونگ - -اس سے پہلے کہ آپ [متغیر شیڈونگ](https://en.wikipedia.org/wiki/Variable_shadowing) کر سکیں اور اس طرح کا کوڈ کام کرے گا: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -تاہم اب یہ ممکن نہیں ہے، اور کمپائلر یہ غلطی واپس کرتا ہے: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -اگر آپ کے پاس متغیر شیڈونگ ہے تو آپ کو اپنے ڈپلیکیٹ متغیرات کا نام تبدیل کرنے کی ضرورت ہوگی. - -### کالعدم موازنہ - -اپنے سب گراف پر اپ گریڈ کرنے سے، بعض اوقات آپ کو اس طرح کی غلطیاں مل سکتی ہیں: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -حل کرنے کے لیے آپ صرف `if` اسٹیٹمنٹ کو اس طرح تبدیل کر سکتے ہیں: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -اگر آپ == کی بجائے != کر رہے ہیں تو بھی یہی لاگو ہوتا ہے. - -### کاسٹنگ - -اس سے پہلے کاسٹ کرنے کا عام طریقہ صرف `as` کلیدی لفظ استعمال کرنا تھا، اس طرح: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -تاہم یہ صرف دو صورتوں میں کام کرتا ہے: - -- قدیم کاسٹنگ (قسم کے درمیان جیسے کہ `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- کلاس وراثت پر اپکاسٹنگ (سپر کلاس → سب کلاس) - -مثالیں: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -دو ایسے حالات ہیں جہاں آپ کاسٹ کرنا چاہیں گے، لیکن `as`/`var` **محفوظ نہیں ہے**: - -- طبقاتی وراثت میں کمی ( سب کلاس → سپر کلاس) -- دو قسموں کے درمیان جو ایک سپر کلاس کا اشتراک کرتے ہیں - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -ان معاملات کے لیے، آپ `changetype` فنکشن استعمال کر سکتے ہیں: - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -اگر آپ صرف منسوخی کو ہٹانا چاہتے ہیں، تو آپ `as` آپریٹر (یا `variable`) کا استعمال جاری رکھ سکتے ہیں، لیکن یقینی بنائیں کہ آپ جانتے ہیں کہ قدر کالعدم نہیں ہوسکتی دوسری صورت میں یہ ٹوٹ جائے گا. - -```typescript -// remove nullability -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -منسوخی کے معاملے کے لیے ہم تجویز کرتے ہیں کہ [منسوخی چیک فیچر](https://www.assemblyscript.org/basics.html#nullability-checks) پر ایک نظر ڈالیں، یہ آپ کے کوڈ کو صاف کر دے گا 🙂 - -نیز ہم نے کاسٹنگ کو آسان بنانے کے لیے کچھ اقسام میں کچھ مزید جامد طریقے شامل کیے ہیں، وہ یہ ہیں: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### پراپرٹی ایکسیس کے ساتھ منسوخی کی جانچ - -[منسوخی چیک فیچر](https://www.assemblyscript.org/basics.html#nullability-checks) استعمال کرنے کے لیے آپ یا تو `if` اسٹیٹمنٹس یا ٹرنری استعمال کرسکتے ہیں۔ آپریٹر (`?` اور `:`) اس طرح: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// or - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -تاہم یہ صرف اس وقت کام کرتا ہے جب آپ متغیر پر `if` / ٹرنری کر رہے ہوں، نہ کہ کسی پراپرٹی تک رسائی پر، اس طرح: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile -``` - -جو اس غلطی کو ظاہر کرتا ہے: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -اس مسئلے کو حل کرنے کے لیے، آپ اس پراپرٹی تک رسائی کے لیے ایک متغیر بنا سکتے ہیں تاکہ مرتب کرنے والا منسوخی چیک میجک کر سکے: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) -``` - -### پراپرٹی ایکسیس کے ساتھ آپریٹر اوور لوڈنگ - -اگر آپ (مثال کے طور پر) ایک غیر منسوخ قسم (پراپرٹی ایکسیس سے) کو ایک غیر کالعدم کے ساتھ جمع کرنے کی کوشش کرتے ہیں، تو اسمبلی اسکرپٹ کمپائلر کمپائل ٹائم ایرر انتباہ دینے کے بجائے کہ ایک قدر کالعدم ہے، یہ صرف خاموشی سے مرتب کرتا ہے، موقع دیتے ہوئے رن ٹائم پر کوڈ کے ٹوٹنے کے لیے. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should -``` - -ہم نے اس کے لیے اسمبلی اسکرپٹ کمپائلر پر ایک ایشو کھولا ہے، لیکن ابھی کے لیے اگر آپ اپنی سب گراف میپنگز میں اس قسم کی کارروائیاں کرتے ہیں، تو آپ کو اس سے پہلے ایک کالعدم چیک کرنے کے لیے انہیں تبدیل کرنا چاہیے. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt -``` - -### ویلیو کی ابتدا - -اگر آپ کے پاس اس طرح کا کوڈ ہے: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -یہ کمپائل کرے گا لیکن رن ٹائم پر ٹوٹ جائے گا، ایسا اس لیے ہوتا ہے کیونکہ ویلیو شروع نہیں کی گئی ہے، اس لیے یقینی بنائیں کہ آپ کے سب گراف نے اپنی ویلیوس کی ابتدا کی ہے، اس طرح: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -نیز اگر آپ کے پاس گراف کیو ایل ہستی میں کالعدم خصوصیات ہیں، اس طرح: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -اور آپ کے پاس اس سے ملتا جلتا کوڈ ہے: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -آپ کو `total.amount` ویلیو کو شروع کرنا یقینی بنانا ہوگا، کیونکہ اگر آپ رقم کے لیے آخری لائن کی طرح رسائی حاصل کرنے کی کوشش کرتے ہیں، تو یہ کریش ہوجائے گا۔ تو آپ یا تو اسے پہلے شروع کریں: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -یا آپ اپنے GraphQL سکیما کو تبدیل کر سکتے ہیں تاکہ اس پراپرٹی کے لیے ایک غیر قابل استعمال قسم کا استعمال نہ کیا جائے، پھر ہم اسے `codegen` ویلیو پر صفر کے طور پر شروع کریں گے 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // already initializes non-nullable properties -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### کلاس پراپرٹی کی شروعات - -اگر آپ ایسی خصوصیات کے ساتھ کسی بھی کلاس کو برآمد کرتے ہیں جو دوسری کلاسیں ہیں (آپ کے ذریعہ یا معیاری لائبریری کے ذریعہ اعلان کردہ) اس طرح: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -کمپائلر میں غلطی ہو جائے گی کیونکہ آپ کو یا تو ان خصوصیات کے لیے ایک ابتدائی شامل کرنے کی ضرورت ہے جو کلاسز ہیں، یا `!` آپریٹر شامل کریں: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### ایرے شروع کرنا - -`Array` کلاس ابھی بھی فہرست کی لمبائی کو شروع کرنے کے لیے ایک نمبر کو قبول کرتی ہے، تاہم آپ کو خیال رکھنا چاہیے کیونکہ `.push` جیسے آپریشن شروع میں شامل کرنے کے بجائے سائز میں اضافہ کریں گے، مثال کے طور پر: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -ان اقسام پر منحصر ہے جو آپ استعمال کر رہے ہیں، مثال کے طور پر کالعدم، اور آپ ان تک کیسے رسائی حاصل کر رہے ہیں، آپ کو رن ٹائم غلطی کا سامنا کرنا پڑ سکتا ہے اس طرح: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -اصل میں شروع میں دھکیلنے کے لیے آپ کو یا تو، `Array` کو سائز صفر کے ساتھ شروع کرنا چاہیے، اس طرح: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -یا آپ کو اسے انڈیکس کے ذریعے تبدیل کرنا چاہئے: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### گراف کیو ایل اسکیما - -یہ اسمبلی اسکرپٹ میں براہ راست تبدیلی نہیں ہے، لیکن آپ کو اپنی `schema.graphql` فائل کو اپ ڈیٹ کرنا پڑ سکتا ہے. - -اب آپ اپنی اقسام میں ان فیلڈز کی وضاحت نہیں کر سکتے جو کہ غیر منسوخ فہرست ہیں۔ اگر آپ کے پاس اس طرح کا سکیما ہے: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -آپ کو فہرست کی قسم کے میمبر میں ایک `!` شامل کرنا پڑے گا، اس طرح: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -یہ اسمبلی اسکرپٹ ورژنز کے درمیان منسوخی کے فرق کی وجہ سے تبدیل ہوا، اور اس کا تعلق `src/generated/schema.ts` فائل سے ہے (پہلے سے طے شدہ راستہ، ہو سکتا ہے آپ نے اسے تبدیل کیا ہو). - -### دوسرا - -- قیاس کے ساتھ منسلک `Map#set` اور `Set#add`، واپس کر رہے ہیں `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- ایریس اب ArrayBufferView سے وراثت میں نہیں ملتی ہیں، لیکن اب الگ ہیں ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- آبجیکٹ لٹریلز سے شروع کی گئی کلاسز اب کنسٹرکٹر کی وضاحت نہیں کر سکتی ہیں ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- ایک `**` بائنری آپریشن کا نتیجہ اب مشترکہ ڈینومینیٹر انٹیجر ہے اگر دونوں آپرینڈ انٹیجرز ہیں۔ اس سے پہلے، نتیجہ ایک فلوٹ تھا جیسے کہ کال کر رہا ہو `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- زبردستی `NaN` کو `false` جب `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) پر کاسٹ کریں -- `i8`/`u8` یا `i16`/`u16` قسم کی چھوٹی انٹیجر ویلیو کو منتقل کرتے وقت، بالترتیب صرف 3 کم از کم 4 RHS ویلیو کے اہم بٹس نتیجہ کو متاثر کرتے ہیں، جو کہ `i32.shl` کے نتیجہ کے مطابق صرف RHS ویلیو کے 5 کم سے کم اہم بٹس سے متاثر ہوتے ہیں۔ مثال: `someI8 << 8` نے پہلے `0` کی ویلیو تیار کی تھی، لیکن اب RHS کو `8 & 7 = 0` (3 بٹس) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- جب سائز مختلف ہوں تو متعلقہ سٹرنگ کے موازنہ کی بگ فکس ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/ur/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/ur/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 569f6a35898b..000000000000 --- a/website/src/pages/ur/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,537 +0,0 @@ ---- -title: GraphQL کی توثیق کی منتقلی گائیڈ ---- - -جلد ہی `گراف نوڈ` [GraphQL توثیق کی تفصیلات](https://spec.graphql.org/June2018/#sec-Validation) کی 100% کوریج کو سپورٹ کرے گا. - -`گراف نوڈ` کے پچھلے ورژن تمام توثیقوں کی حمایت نہیں کرتے تھے اور زیادہ خوبصورت جوابات فراہم کرتے تھے - لہذا، ابہام کی صورت میں، `گراف نوڈ` غلط گراف کیو ایل آپریشن کے اجزاء کو نظر انداز کر رہا تھا. - -GraphQL ویلیڈیشن سپورٹ آنے والی نئی خصوصیات اور گراف نیٹ ورک کے پیمانے پر کارکردگی کا ایک ستون ہے. - -یہ کیوری کے جوابات کے تعین کو بھی یقینی بنائے گا، جو گراف نیٹ ورک پر ایک اہم ضرورت ہے. - -**GraphQL کی توثیق کو فعال کرنے سے کچھ موجودہ کیوریز ختم ہو جائیں گے** جو گراف API کو بھیجے گئے ہیں. - -ان توثیقوں کی تعمیل کرنے کے لیے، براہ کرم مائگریشن گائیڈ پر عمل کریں. - -> ⚠️ اگر آپ اپنے کیوریز کو توثیق کرنے سے پہلے منتقل نہیں کرتے ہیں، تو وہ غلطیاں واپس کریں گے اور ممکنہ طور پر آپ کے فرنٹ اینڈ/کلائنٹس کو توڑ دیں گے. - -## مائیگریشن گائیڈ - -آپ اپنے GraphQL آپریشنز میں کسی بھی مسئلے کو تلاش کرنے اور انہیں ٹھیک کرنے کے لیے CLI مائیگریشن ٹول استعمال کر سکتے ہیں۔ متبادل طور پر آپ `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` اینڈ پوائنٹ استعمال کرنے کے لیے اپنے GraphQL کلائنٹ کے اینڈ پوائنٹ کو اپ ڈیٹ کر سکتے ہیں۔ اس اختتامی نقطہ کے خلاف اپنے کیوریز کی جانچ کرنے سے آپ کو اپنے کیوریز میں مسائل تلاش کرنے میں مدد ملے گی. - -> اگر آپ [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) یا [ GraphQL کوڈ جنریٹر] (/https://the-guild.dev) استعمال کر رہے ہیں تو تمام سب گراف کو منتقل کرنے کی ضرورت نہیں ہوگی۔ /graphql/codegen، وہ پہلے ہی اس بات کو یقینی بناتے ہیں کہ آپ کے کیوریز درست ہیں. - -## مائیگریشن CLI ٹول - -**GraphQL آپریشنز کی زیادہ تر خرابیاں آپ کے کوڈ بیس میں وقت سے پہلے پائی جا سکتی ہیں۔** - -اس وجہ سے، ہم ترقی کے دوران یا CI میں آپ کے GraphQL آپریشنز کی توثیق کرنے کے لیے ایک ہموار تجربہ فراہم کرتے ہیں. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) ایک سادہ سی ایل آئی ٹول ہے جو دیئے گئے اسکیما کے خلاف GraphQL آپریشنز کی توثیق کرنے میں مدد کرتا ہے. - -### **شروع ہوا چاہتا ہے** - -آپ اس آلے کو اس طرح چلا سکتے ہیں: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**نوٹس:** - -- $GITHUB_USER، $SUBGRAPH_NAME کو مناسب اقدار کے ساتھ سیٹ یا تبدیل کریں۔ جیسے: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- پیش نظارہ اسکیما URL (https://api-next.thegraph.com/) فراہم کردہ بہت زیادہ شرح سے محدود ہے اور تمام صارفین کے نئے ورژن پر منتقل ہونے کے بعد غروب ہو جائے گا۔ **اسے پیداوار میں استعمال نہ کریں۔** -- مندرجہ ذیل ایکسٹینشنز [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx کے ساتھ فائلوں میں آپریشنز کی شناخت کی جاتی ہے۔ `, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` آپشن). - -### CLI آؤٹ پٹ - -`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI ٹول کسی بھی گراف کیو ایل آپریشن کی خرابیوں کو اس طرح آؤٹ پٹ کرے گا: - -![CLI سے خرابی کی پیداوار](https://i.imgur.com/x1cBdhq.png) - -ہر غلطی کے لیے، آپ کو تفصیل، فائل کا راستہ اور پوزیشن، اور حل کی مثال کا لنک ملے گا (مندرجہ ذیل سیکشن دیکھیں). - -## اپنے مقامی کیوریز کو پیش نظارہ اسکیما کے خلاف چلائیں - -ہم ایک اینڈ پوائنٹ فراہم کرتے ہیں `https://api-next.thegraph.com/` جو ایک `گراف نوڈ` ورژن چلاتا ہے جس کی توثیق آن ہے. - -آپ کیوریز کو ان پر بھیج کر آزما سکتے ہیں: - -- `https://api-next.thegraph.com/subgraphs/id/` - -یا - -- `https://api-next.thegraph.com/subgraphs/name//` - -ان کیوریز پر کام کرنے کے لیے جن کو توثیق کی خرابیوں کے طور پر نشان زد کیا گیا ہے، آپ اپنا پسندیدہ GraphQL کیوریز کا ٹول استعمال کر سکتے ہیں، جیسے الٹیر یا [GraphiQL](https://cloud.hasura.io/public/graphiql)، اور اپنے کیوری کو آزمائیں۔ وہ ٹولز ان غلطیوں کو اپنے UI میں بھی نشان زد کریں گے، یہاں تک کہ آپ اسے چلانے سے پہلے. - -## مسائل کو حل کرنے کا طریقہ - -ذیل میں، آپ کو GraphQL کی توثیق کی وہ تمام خرابیاں ملیں گی جو آپ کے موجودہ GraphQL آپریشنز میں ہو سکتی ہیں. - -### GraphQL متغیرات، آپریشنز، ٹکڑے، یا دلائل منفرد ہونے چاہئیں - -ہم نے اس بات کو یقینی بنانے کے لیے قواعد لاگو کیے ہیں کہ آپریشن میں GraphQL متغیرات، آپریشنز، ٹکڑوں اور دلائل کا ایک منفرد سیٹ شامل ہے. - -GraphQL آپریشن صرف اس صورت میں درست ہے جب اس میں کوئی ابہام نہ ہو. - -اس کو حاصل کرنے کے لیے، ہمیں یہ یقینی بنانا ہوگا کہ آپ کے GraphQL آپریشن میں کچھ اجزاء منفرد ہونے چاہئیں. - -یہاں چند غلط کارروائیوں کی ایک مثال ہے جو ان اصولوں کی خلاف ورزی کرتی ہیں: - -**نقل کیوری کا نام (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_حل:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**ڈپلیکیٹ ٹکڑے کا نام (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_حل:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**ڈپلیکیٹ متغیر نام (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_حل:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**ڈپلیکیٹ دلیل کا نام (#Unique Argument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_حل:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**ڈپلیکیٹ گمنام کیوری (#LoneAnonymousOperationRule)** - -نیز، جوابی ڈھانچے میں تضاد کی وجہ سے دو گمنام آپریشنز کا استعمال 'LoneAnonymousOperation' اصول کی خلاف ورزی کرے گا: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_حل:_ - -```graphql -query { - someField - otherField -} -``` - -یا دو کیوریز کا نام دیں: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### اوور لیپنگ فیلڈز - -GraphQL سلیکشن سیٹ صرف اس صورت میں درست سمجھا جاتا ہے جب یہ حتمی نتیجہ سیٹ کو درست طریقے سے حل کرتا ہے. - -اگر کوئی مخصوص سلیکشن سیٹ، یا فیلڈ، منتخب فیلڈ یا استعمال شدہ دلائل کے ذریعے ابہام پیدا کرتا ہے، تو GraphQL سروس آپریشن کی توثیق کرنے میں ناکام ہو جائے گی. - -یہاں غلط کاروائیوں کی چند مثالیں ہیں جو اس اصول کی خلاف ورزی کرتی ہیں: - -**متضاد فیلڈز عرفی نام (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_حل:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -``` - -**دلائل کے ساتھ متضاد فیلڈز (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_حل:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -اس کے علاوہ، زیادہ پیچیدہ استعمال کے معاملات میں، آپ دو ٹکڑے استعمال کر کے اس اصول کی خلاف ورزی کر سکتے ہیں جو بالآخر متوقع سیٹ میں تنازعہ کا سبب بن سکتے ہیں: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -اس کے علاوہ، کلائنٹ سائڈ گراف کیو ایل ڈائریکٹیو جیسے `@skip` اور `@include` ابہام کا باعث بن سکتے ہیں، مثال کے طور پر: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[آپ الگورتھم کے بارے میں یہاں مزید پڑھ سکتے ہیں۔](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### غیر استعمال شدہ متغیرات یا ٹکڑے - -GraphQL آپریشن کو بھی صرف اسی صورت میں درست سمجھا جاتا ہے جب آپریشن سے متعین تمام اجزاء (متغیرات، ٹکڑے) استعمال کیے جائیں. - -یہاں GraphQL آپریشنز کے لیے چند مثالیں ہیں جو ان اصولوں کی خلاف ورزی کرتی ہیں: - -**غیر استعمال شدہ متغیر** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_حل:_ - -```graphql -query something { - someData -} -``` - -**غیر استعمال شدہ ٹکڑا** (#NoUnused FragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_حل:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### غلط یا غائب سلیکشن سیٹ (#ScalarLeafsRule) - -نیز، GraphQL فیلڈ کا انتخاب صرف اس صورت میں درست ہے جب درج ذیل کی توثیق کی گئی ہو: - -- ایک آبجیکٹ فیلڈ میں انتخاب کا سیٹ ہونا ضروری ہے. -- ایک کنارے والے فیلڈ (اسکیلر، اینم) میں انتخابی سیٹ متعین نہیں ہونا چاہیے. - -درج ذیل اسکیما کے ساتھ ان قواعد کی خلاف ورزیوں کی چند مثالیں یہ ہیں: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**غلط سلیکشن سیٹ** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_حل:_ - -```graphql -query { - user { - id - } -} -``` - -**موجود سلیکشن سیٹ** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_حل:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### غلط دلائل کی قدریں (#VariablesInAllowedPositionRule) - -GraphQL آپریشنز جو ہارڈ کوڈ شدہ اقدار کو آرگیومینٹس میں منتقل کرتے ہیں، اسکیما میں بیان کردہ قدر کی بنیاد پر درست ہونے چاہئیں. - -یہاں غلط کارروائیوں کی چند مثالیں ہیں جو ان اصولوں کی خلاف ورزی کرتی ہیں: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### نامعلوم قسم، متغیر، ٹکڑا، یا ہدایت (#UnknownX) - -اگر کوئی نامعلوم قسم، متغیر، ٹکڑا، یا ہدایت کا استعمال کیا جاتا ہے تو GraphQL API ایک خرابی پیدا کرے گا. - -ان نامعلوم حوالہ جات کو درست کرنا ضروری ہے: - -- اگر یہ ٹائپنگ کی غلطی تھی تو نام تبدیل کریں -- دوسری صورت میں، ہٹا دیں - -### ٹکڑا: غلط پھیلاؤ یا تعریف - -**غلط فریگمنٹ اسپریڈ (#PossibleFragmentSpreadsRule)** - -ایک ٹکڑا ایک غیر قابل اطلاق قسم پر پھیلایا نہیں جا سکتا. - -مثال کے طور پر، ہم 'کتے' کی قسم پر 'بلی' کے ٹکڑے کا اطلاق نہیں کر سکتے ہیں: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**فریگمنٹ کی غلط تعریف (#FragmentsOnCompositeTypesRule)** - -تمام ٹکڑوں کو ایک جامع قسم پر (`آن...` کا استعمال کرتے ہوئے) بیان کیا جانا چاہیے، مختصر میں: آبجیکٹ، انٹرفیس، یا یونین. - -درج ذیل مثالیں غلط ہیں، کیونکہ اسکیلرز پر ٹکڑوں کی وضاحت کرنا غلط ہے. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### ہدایات کا استعمال - -**ہدایت کو اس مقام پر استعمال نہیں کیا جا سکتا (#KnownDirectivesRule)** - -صرف GraphQL ڈائریکٹیو (`@...`) کو استعمال کیا جا سکتا ہے جو گراف API کے ذریعے تعاون یافتہ ہیں. - -GraphQL کی معاونت والی ہدایات کے ساتھ ایک مثال یہ ہے: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_نوٹ: `@stream`، `@live`، `@defer` تعاون یافتہ نہیں ہیں۔_ - -**ہدایت کو اس مقام پر صرف ایک بار استعمال کیا جا سکتا ہے (#UniqueDirectivesPerLocationRule)** - -گراف کی طرف سے تعاون یافتہ ہدایات فی مقام صرف ایک بار استعمال کی جا سکتی ہیں. - -درج ذیل غلط ہے (اور بے کار): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/ur/resources/subgraph-studio-faq.mdx b/website/src/pages/ur/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/ur/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/ur/subgraphs/_meta-titles.json b/website/src/pages/ur/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/ur/subgraphs/_meta-titles.json +++ b/website/src/pages/ur/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/ur/subgraphs/_meta.js b/website/src/pages/ur/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/ur/subgraphs/_meta.js +++ b/website/src/pages/ur/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/ur/subgraphs/best-practices/_meta.js b/website/src/pages/ur/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/ur/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/ur/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/ur/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/ur/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/ur/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/ur/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/ur/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/ur/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ur/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/ur/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/best-practices/pruning.mdx b/website/src/pages/ur/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/ur/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/best-practices/timeseries.mdx b/website/src/pages/ur/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/ur/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/cookbook/_meta.js b/website/src/pages/ur/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/ur/subgraphs/cookbook/_meta.js +++ b/website/src/pages/ur/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/ur/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/ur/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 780d7ad3f827..000000000000 --- a/website/src/pages/ur/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### جائزہ - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## اضافی وسائل - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/ur/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/cookbook/pruning.mdx b/website/src/pages/ur/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/ur/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/cookbook/timeseries.mdx b/website/src/pages/ur/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 5049667a8d5c..000000000000 --- a/website/src/pages/ur/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## جائزہ - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -مثال: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -مثال: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -مثال: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -مثال: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/ur/subgraphs/developing/deploying/_meta.js b/website/src/pages/ur/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/ur/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/ur/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 424c64245d7d..000000000000 --- a/website/src/pages/ur/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: سب گراف سٹوڈیو کے اکثر پوچھے گئے سوالات ---- - -## 1. سب گراف سٹوڈیو کیا ہے؟ - -[سب گراف سٹوڈیو](https://thegraph.com/studio/) سب گراف اور API کیز بنانے، ان کا نظم کرنے اور شائع کرنے کے لیے ایک ڈیپ ہے. - -## 2. میں ایک API کلید کیسے بنا سکتا ہوں؟ - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. کیا میں ایک سے زیادہ API کلیدیں بنا سکتا ہوں؟ - -جی ہاں! آپ مختلف پروجیکٹس میں استعمال کرنے کے لیے متعدد API کلیدیں بنا سکتے ہیں۔ لنک [یہاں](https://thegraph.com/studio/apikeys/) دیکھیں. - -## 4. میں API کلید کے لیے ڈومین کو کیسے محدود کروں؟ - -ایک API کلید بنانے کے بعد، سیکورٹی سیکشن میں، آپ ان ڈومینز کی وضاحت کر سکتے ہیں جو ایک مخصوص API کلید سے استفسار کر سکتے ہیں. - -## 5. کیا میں اپنا سب گراف کسی دوسرے مالک کو منتقل کر سکتا ہوں؟ - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -نوٹ کریں کہ ایک بار منتقل ہونے کے بعد آپ سٹوڈیو میں سب گراف کو دیکھنے یا اس میں ترمیم کرنے کے قابل نہیں رہیں گے. - -## 6. اگر میں اس سب گراف کا ڈویلپر نہیں ہوں جسے میں استعمال کرنا چاہتا ہوں تو میں سب گراف کے لیے کیوری کے URLs کیسے تلاش کروں؟ - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -یاد رکھیں کہ آپ ایک API کلید بنا سکتے ہیں اور نیٹ ورک پر شائع ہونے والے کسی بھی سب گراف سے کیوری کر سکتے ہیں، چاہے آپ خود ایک سب گراف بناتے ہوں۔ نئی API کلید کے ذریعے یہ کیوریز، نیٹ ورک پر کسی دوسرے کی طرح ادائیگی کے سوالات ہیں. diff --git a/website/src/pages/ur/subgraphs/developing/publishing/_meta.js b/website/src/pages/ur/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/ur/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/ur/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/ur/subgraphs/querying/_meta.js b/website/src/pages/ur/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/ur/subgraphs/querying/_meta.js +++ b/website/src/pages/ur/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/vi/resources/_meta-titles.json b/website/src/pages/vi/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/vi/resources/_meta-titles.json +++ b/website/src/pages/vi/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/vi/resources/_meta.js b/website/src/pages/vi/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/vi/resources/_meta.js +++ b/website/src/pages/vi/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/vi/resources/release-notes/_meta.js b/website/src/pages/vi/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/vi/resources/release-notes/_meta.js rename to website/src/pages/vi/resources/migration-guides/_meta.js diff --git a/website/src/pages/vi/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/vi/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/vi/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/vi/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/vi/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/vi/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/vi/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/vi/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index 69c36218d8af..000000000000 --- a/website/src/pages/vi/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: Hướng dẫn Di chuyển AssemblyScript ---- - -Cho đến nay, các subgraph đang sử dụng một trong các [phiên bản đầu tiên của AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Cuối cùng, chúng tôi đã thêm hỗ trợ cho [bản mới nhất hiện có](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 - -Điều đó sẽ cho phép các nhà phát triển subgrap sử dụng các tính năng mới hơn của ngôn ngữ AS và thư viện chuẩn. - -Hướng dẫn này có thể áp dụng cho bất kỳ ai sử dụng `graph-cli`/`graph-ts` dưới phiên bản `0.22.0`. Nếu bạn đã ở phiên bản cao hơn (hoặc bằng) với phiên bản đó, bạn đã sử dụng phiên bản`0.19.10` của AssemblyScript 🙂 - -> Lưu ý: Kể từ `0.24.0`, `graph-node` có thể hỗ trợ cả hai phiên bản, tùy thuộc vào `apiVersion` được chỉ định trong tệp kê khai subgraph. - -## Các đặc điểm - -### Chức năng mới - -- `TypedArray`s bây giờ có thể được xây dựng từ `ArrayBuffer`s bằng cách sử dụng [phương pháp tĩnh `wrap` mới](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- Các chức năng thư viện tiêu chuẩn mới: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Đã thêm hỗ trợ cho x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Đã thêm `StaticArray`, một biến thể mảng hiệu quả hơn ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Đã thêm `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Đã thực hiện đối số `radix` trên `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Đã thêm hỗ trợ cho dấu phân cách trong các ký tự dấu phẩy động ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Đã thêm hỗ trợ cho các chức năng hạng nhất ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Thực hiện `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Đã thêm hỗ trợ cho chuỗi ký tự mẫu ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Thêm `encodeURI(Component)` và `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Thêm `toString`, `toDateString` và `toTimeString` vào `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Thêm `toUTCString` cho `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### Tối ưu hóa - -- Các chức năng `Math` như `exp`, `exp2`, `log`, `log2` và `pow` đã được thay thế bằng các biến thể nhanh hơn ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Tối ưu hóa một chút `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Lưu vào bộ nhớ cache các truy cập trường khác trong std Map và Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Tối ưu hóa cho sức mạnh của hai trong `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) - -### Khác - -- Kiểu của một ký tự mảng bây giờ có thể được suy ra từ nội dung của nó ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Đã cập nhật stdlib thành Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## Làm thế nào để nâng cấp? - -1. Thay đổi `apiVersion` ánh xạ của bạn trong `subgraph.yaml` thành `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. Cập nhật `graph-cli` bạn đang sử dụng thành phiên bản `latest` bằng cách chạy: - -```bash -# nếu bạn đã cài đặt nó trên toàn cầu -npm install --global @graphprotocol/graph-cli@latest - -# hoặc trong subgraph của bạn nếu bạn có nó như một phụ thuộc của nhà phát triển -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. Làm tương tự đối với `graph-ts`, nhưng thay vì cài đặt trên toàn cầu, hãy lưu nó trong các phần phụ thuộc chính của bạn: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. Làm theo phần còn lại của hướng dẫn để sửa các thay đổi về lỗi ngôn ngữ. -5. Chạy `codegen` và `deploy` lại. - -## Thay đổi đột phá - -### Vô hiệu - -Trên phiên bản AssemblyScript cũ hơn, bạn có thể tạo mã như sau: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -Tuy nhiên, trên phiên bản mới hơn, vì giá trị là nullable, nó yêu cầu bạn kiểm tra, như sau: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -Hoặc buộc nó như thế này: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -Nếu bạn không chắc nên chọn cái nào, chúng tôi khuyên bạn nên luôn sử dụng phiên bản an toàn. Nếu giá trị không tồn tại, bạn có thể chỉ muốn thực hiện câu lệnh if sớm với trả về trong trình xử lý subgraph của bạn. - -### Variable Shadowing (Che khuất Biến) - -Trước khi bạn có thể thực hiện [che biến](https://en.wikipedia.org/wiki/Variable_shadowing) và mã như thế này sẽ hoạt động: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -Tuy nhiên, bây giờ điều này không còn khả thi nữa và trình biên dịch trả về lỗi này: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -Bạn sẽ cần đổi tên các biến trùng lặp của mình nếu bạn có che biến. - -### So sánh Null - -Bằng cách thực hiện nâng cấp trên subgraph của bạn, đôi khi bạn có thể gặp các lỗi như sau: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -Để giải quyết, bạn có thể chỉ cần thay đổi câu lệnh `if` thành một cái gì đó như sau: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -Điều tương tự cũng áp dụng nếu bạn đang làm != Thay vì ==. - -### Ép kiểu (Casting) - -Cách phổ biến để thực hiện ép kiểu trước đây là chỉ sử dụng từ khóa `as`, như sau: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -Tuy nhiên, điều này chỉ hoạt động trong hai trường hợp: - -- Ép kiểu nguyên bản (giữa các kiểu như `u8`, `i32`, `bool`; ví dụ: `let b: isize = 10; b as usize`); -- Upcasting về kế thừa lớp (lớp con → lớp cha) (subclass → superclass) - -Các ví dụ: - -```typescript -// primitive casting (ép kiểu nguyên bản) -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -Có hai trường hợp mà bạn có thể muốn ép kiểu, nhưng việc sử dụng `as`/`var` **không an toàn**: - -- Downcasting về kế thừa lớp (lớp con → lớp cha) (subclass → superclass) -- Giữa hai loại chia sẻ lớp cha - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -Đối với những trường hợp đó, bạn có thể sử dụng hàm `changetype`: - -```typescript -// downcasting về kế thừa lớp -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -Nếu bạn chỉ muốn loại bỏ khả năng vô hiệu, bạn có thể tiếp tục sử dụng toán tử `as` (hoặc `variable`), nhưng hãy đảm bảo rằng bạn biết rằng giá trị không được rỗng (null), nếu không nó sẽ bị vỡ. - -```typescript -// loại bỏ khả năng vô hiệu -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -Đối với trường hợp vô hiệu, chúng tôi khuyên bạn nên xem xét [tính năng kiểm tra khả năng vô hiệu](https://www.assemblyscript.org/basics.html#nullability-checks), nó sẽ giúp mã của bạn sạch hơn 🙂 - -Ngoài ra, chúng tôi đã thêm một vài phương thức tĩnh trong một số kiểu để dễ dàng ép kiểu, chúng là: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### Kiểm tra tính vô hiệu với quyền truy cập thuộc tính - -Để sử dụng [tính năng kiểm tra tính vô hiệu](https://www.assemblyscript.org/basics.html#nullability-checks), bạn có thể sử dụng câu lệnh `if` hoặc câu lệnh ba toán tử (`?` and `:`) như thế này: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// hoặc - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -Tuy nhiên, điều đó chỉ hoạt động khi bạn đang thực hiện `if` / ternary trên một biến, không phải trên quyền truy cập thuộc tính, như sau: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // không biên dịch -``` - -Đầu ra lỗi này: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -Để khắc phục sự cố này, bạn có thể tạo một biến cho quyền truy cập thuộc tính đó để trình biên dịch có thể thực hiện phép thuật kiểm tra tính nullability: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // biên dịch tốt :) -``` - -### Quá tải toán tử với quyền truy cập thuộc tính - -Nếu bạn cố gắng tính tổng (ví dụ) một kiểu nullable (từ quyền truy cập thuộc tính) với một kiểu không thể nullable, trình biên dịch AssemblyScript thay vì đưa ra cảnh báo lỗi thời gian biên dịch rằng một trong các giá trị là nullable, nó chỉ biên dịch âm thầm, tạo cơ hội để mã bị phá vỡ trong thời gian chạy. - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should -``` - -Chúng tôi đã giải quyết vấn đề trên trình biên dịch AssemblyScript cho vấn đề này, nhưng hiện tại nếu bạn thực hiện các loại hoạt động này trong ánh xạ subgraph của mình, bạn nên thay đổi chúng để thực hiện kiểm tra rỗng trước nó. - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt -``` - -### Khởi tạo giá trị - -Nếu bạn có bất kỳ mã nào như thế này: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -Nó sẽ biên dịch nhưng bị hỏng trong thời gian chạy, điều đó xảy ra vì giá trị chưa được khởi tạo, vì vậy hãy đảm bảo rằng subgraph của bạn đã khởi tạo các giá trị của chúng, như sau: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -Ngoài ra, nếu bạn có thuộc tính nullable trong thực thể GraphQL, như sau: - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt -} -``` - -Và bạn có mã tương tự như sau: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -Bạn cần đảm bảo khởi tạo giá trị `total.amount`, bởi vì nếu bạn cố gắng truy cập như ở dòng cuối cùng cho tổng, nó sẽ bị lỗi. Vì vậy, bạn có thể khởi tạo nó trước: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -Hoặc bạn chỉ có thể thay đổi lược đồ GraphQL của mình để không sử dụng kiểu nullable cho thuộc tính này, sau đó chúng tôi sẽ khởi tạo nó bằng 0 ở bước `codegen` 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // already initializes non-nullable properties -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### Khởi tạo thuộc tính lớp - -Nếu bạn xuất bất kỳ lớp nào có thuộc tính là các lớp khác (do bạn hoặc thư viện chuẩn khai báo) như thế này: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -Trình biên dịch sẽ bị lỗi vì bạn cần thêm bộ khởi tạo cho các thuộc tính là các lớp hoặc thêm toán tử `!`: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### Array initialization - -The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: - -``` -ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -To actually push at the beginning you should either, initialize the `Array` with size zero, like this: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -Or you should mutate it via index: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### GraphQL schema - -This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. - -Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -You'll have to add an `!` to the member of the List type, like this: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). - -### Khác - -- Căn chỉnh `Map#set` và `Set#add` với thông số kỹ thuật, trả về `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Các lớp được khởi tạo từ các ký tự đối tượng không còn có thể xác định một phương thức khởi tạo nữa ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Kết quả của phép toán nhị phân `**` bây giờ là số nguyên mẫu số chung nếu cả hai toán hạng đều là số nguyên. Trước đây, kết quả là một float như thể đang gọi `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Ép buộc `NaN` thành `false` khi ép kiểu thành `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- Khi dịch chuyển một giá trị số nguyên nhỏ của kiểu `i8`/`u8` hoặc `i16`/`u16`, chỉ 3 bit tương ứng 4 bit ít quan trọng nhất của giá trị RHS ảnh hưởng đến kết quả, tương tự như kết quả của một `i32.shl` chỉ bị ảnh hưởng bởi 5 bit ít quan trọng nhất của giá trị RHS. Ví dụ: `someI8 << 8` trước đây đã tạo ra giá trị `0`, nhưng bây giờ tạo ra`someI8` do che dấu RHS là `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Sửa lỗi so sánh chuỗi quan hệ khi kích thước khác nhau ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/vi/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/vi/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 4d909e8970a8..000000000000 --- a/website/src/pages/vi/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,538 +0,0 @@ ---- -title: GraphQL Validations migration guide ---- - -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). - -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. - -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. - -It will also ensure determinism of query responses, a key requirement on The Graph Network. - -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. - -To be compliant with those validations, please follow the migration guide. - -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. - -## Migration guide - -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. - -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. - -## Migration CLI tool - -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** - -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. - -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. - -### **Getting started** - -You can run the tool as follows: - -```bash -npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**Notes:** - -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). - -### CLI output - -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). - -## Run your local queries against the preview schema - -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. - -You can try out queries by sending them to: - -- `https://api-next.thegraph.com/subgraphs/id/` - -or - -- `https://api-next.thegraph.com/subgraphs/name//` - -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. - -## How to solve issues - -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. - -### GraphQL variables, operations, fragments, or arguments must be unique - -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. - -A GraphQL operation is only valid if it does not contain any ambiguity. - -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. - -Here's an example of a few invalid operations that violates these rules: - -**Duplicate Query name (#UniqueOperationNamesRule)** - -```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name -query myData { - id -} - -query myData { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id -} - -query myData2 { - # rename the second query - name -} -``` - -**Duplicate Fragment name (#UniqueFragmentNamesRule)** - -```graphql -# The following operation violated the UniqueFragmentName -# rule. -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_Solution:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**Duplicate variable name (#UniqueVariableNamesRule)** - -```graphql -# The following operation violates the UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_Solution:_ - -```graphql -query myData($id: String) { - # keep the relevant variable (here: `$id: String`) - id - ...MyFields -} -``` - -**Duplicate argument name (#UniqueArgument)** - -```graphql -# The following operation violated the UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_Solution:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**Duplicate anonymous query (#LoneAnonymousOperationRule)** - -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: - -```graphql -# This will fail if executed together in -# a single operation with the following two queries: -query { - someField -} - -query { - otherField -} -``` - -_Solution:_ - -```graphql -query { - someField - otherField -} -``` - -Or name the two queries: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### Overlapping Fields - -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. - -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. - -Here are a few examples of invalid operations that violate this rule: - -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. -query { - dogs { - name: nickname - name - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # alias the original `name` field - } -} -``` - -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** - -```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_Solution:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: - -```graphql -query { - # Eventually, we have two "x" definitions, pointing - # to different fields! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) - -### Unused Variables or Fragments - -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. - -Here are a few examples for GraphQL operations that violates these rules: - -**Unused variable** (#NoUnusedVariablesRule) - -```graphql -# Invalid, because $someVar is never used. -query something($someVar: String) { - someData -} -``` - -_Solution:_ - -```graphql -query something { - someData -} -``` - -**Unused Fragment** (#NoUnusedFragmentsRule) - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -fragment AllFields { # unused :( - name - age -} -``` - -_Solution:_ - -```graphql -# Invalid, because fragment AllFields is never used. -query something { - someData -} - -# remove the `AllFields` fragment -``` - -### Invalid or missing Selection-Set (#ScalarLeafsRule) - -Also, a GraphQL field selection is only valid if the following is validated: - -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. - -Here are a few examples of violations of these rules with the following Schema: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**Invalid Selection-Set** - -```graphql -query { - user { - id { # Invalid, because "id" is of type ID and does not have sub-fields - - } - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - } -} -``` - -**Missing Selection-Set** - -```graphql -query { - user { - id - image # `image` requires a Selection-Set for sub-fields! - } -} -``` - -_Solution:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### Incorrect Arguments values (#VariablesInAllowedPositionRule) - -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. - -Here are a few examples of invalid operations that violate these rules: - -```graphql -query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. - purpose(name: 1) { - id - } -} - -# This might also happen when an incorrect variable is defined: - -query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` - purpose(name: $name) { - id - } -} -``` - -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) - -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. - -Those unknown references must be fixed: - -- rename if it was a typo -- otherwise, remove - -### Fragment: invalid spread or definition - -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** - -A Fragment cannot be spread on a non-applicable type. - -Example, we cannot apply a `Cat` fragment to the `Dog` type: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** - -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. - -The following examples are invalid, since defining fragments on scalars is invalid. - -```graphql -fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` is not a subtype of `Dog` - somethingElse - } -} -``` - -### Directives usage - -**Directive cannot be used at this location (#KnownDirectivesRule)** - -Only GraphQL directives (`@...`) supported by The Graph API can be used. - -Here is an example with The GraphQL supported directives: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_Note: `@stream`, `@live`, `@defer` are not supported._ - -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** - -The directives supported by The Graph can only be used once per location. - -The following is invalid (and redundant): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/vi/resources/subgraph-studio-faq.mdx b/website/src/pages/vi/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/vi/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/vi/subgraphs/_meta-titles.json b/website/src/pages/vi/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/vi/subgraphs/_meta-titles.json +++ b/website/src/pages/vi/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/vi/subgraphs/_meta.js b/website/src/pages/vi/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/vi/subgraphs/_meta.js +++ b/website/src/pages/vi/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/vi/subgraphs/best-practices/_meta.js b/website/src/pages/vi/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/vi/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/vi/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/vi/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/vi/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/vi/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/vi/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/vi/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/vi/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/vi/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/vi/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/best-practices/pruning.mdx b/website/src/pages/vi/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/vi/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/best-practices/timeseries.mdx b/website/src/pages/vi/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/vi/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/cookbook/_meta.js b/website/src/pages/vi/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/vi/subgraphs/cookbook/_meta.js +++ b/website/src/pages/vi/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/vi/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/vi/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index ddf32cc35aa6..000000000000 --- a/website/src/pages/vi/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### Tổng quan - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## Additional Resources - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/vi/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/cookbook/pruning.mdx b/website/src/pages/vi/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/vi/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/cookbook/timeseries.mdx b/website/src/pages/vi/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 4a55ac2bcd5a..000000000000 --- a/website/src/pages/vi/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## Tổng quan - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Example: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Example: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Example: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Example: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/vi/subgraphs/developing/deploying/_meta.js b/website/src/pages/vi/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/vi/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/vi/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/vi/subgraphs/developing/publishing/_meta.js b/website/src/pages/vi/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/vi/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/vi/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/vi/subgraphs/querying/_meta.js b/website/src/pages/vi/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/vi/subgraphs/querying/_meta.js +++ b/website/src/pages/vi/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '', diff --git a/website/src/pages/zh/resources/_meta-titles.json b/website/src/pages/zh/resources/_meta-titles.json index 8ac14af7627a..f5971e95a8f6 100644 --- a/website/src/pages/zh/resources/_meta-titles.json +++ b/website/src/pages/zh/resources/_meta-titles.json @@ -1,4 +1,4 @@ { "roles": "Additional Roles", - "release-notes": "Release Notes & Upgrade Guides" + "migration-guides": "Migration Guides" } diff --git a/website/src/pages/zh/resources/_meta.js b/website/src/pages/zh/resources/_meta.js index 3c0862ea1859..66cf79a52b51 100644 --- a/website/src/pages/zh/resources/_meta.js +++ b/website/src/pages/zh/resources/_meta.js @@ -5,5 +5,6 @@ export default { tokenomics: '', benefits: '', roles: titles.roles, - 'release-notes': titles['release-notes'], + 'migration-guides': titles['migration-guides'], + 'subgraph-studio-faq': '', } diff --git a/website/src/pages/zh/resources/release-notes/_meta.js b/website/src/pages/zh/resources/migration-guides/_meta.js similarity index 100% rename from website/src/pages/zh/resources/release-notes/_meta.js rename to website/src/pages/zh/resources/migration-guides/_meta.js diff --git a/website/src/pages/zh/resources/migration-guides/assemblyscript-migration-guide.mdx b/website/src/pages/zh/resources/migration-guides/assemblyscript-migration-guide.mdx new file mode 100644 index 000000000000..85f6903a6c69 --- /dev/null +++ b/website/src/pages/zh/resources/migration-guides/assemblyscript-migration-guide.mdx @@ -0,0 +1,524 @@ +--- +title: AssemblyScript Migration Guide +--- + +Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 + +That will enable subgraph developers to use newer features of the AS language and standard library. + +This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 + +> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. + +## Features + +### New functionality + +- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) + +### Optimizations + +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) + +### Other + +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) + +## How to upgrade? + +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: + +```yaml +... +dataSources: + ... + mapping: + ... + apiVersion: 0.0.6 + ... +``` + +2. Update the `graph-cli` you're using to the `latest` version by running: + +```bash +# if you have it globally installed +npm install --global @graphprotocol/graph-cli@latest + +# or in your subgraph if you have it as a dev dependency +npm install --save-dev @graphprotocol/graph-cli@latest +``` + +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: + +```bash +npm install --save @graphprotocol/graph-ts@latest +``` + +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. + +## Breaking changes + +### Nullability + +On the older version of AssemblyScript, you could create code like this: + +```typescript +function load(): Value | null { ... } + +let maybeValue = load(); +maybeValue.aMethod(); +``` + +However on the newer version, because the value is nullable, it requires you to check, like this: + +```typescript +let maybeValue = load() + +if (maybeValue) { + maybeValue.aMethod() // `maybeValue` is not null anymore +} +``` + +Or force it like this: + +```typescript +let maybeValue = load()! // breaks in runtime if value is null + +maybeValue.aMethod() +``` + +If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. + +### Variable Shadowing + +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: + +```typescript +let a = 10 +let b = 20 +let a = a + b +``` + +However now this isn't possible anymore, and the compiler returns this error: + +```typescript +ERROR TS2451: Cannot redeclare block-scoped variable 'a' + + let a = a + b; + ~~~~~~~~~~~~~ +in assembly/index.ts(4,3) +``` + +You'll need to rename your duplicate variables if you had variable shadowing. + +### Null Comparisons + +By doing the upgrade on your subgraph, sometimes you might get errors like these: + +```typescript +ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. + if (decimals == null) { + ~~~~ + in src/mappings/file.ts(41,21) +``` + +To solve you can simply change the `if` statement to something like this: + +```typescript + if (!decimals) { + + // or + + if (decimals === null) { +``` + +The same applies if you're doing != instead of ==. + +### Casting + +The common way to do casting before was to just use the `as` keyword, like this: + +```typescript +let byteArray = new ByteArray(10) +let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +``` + +However this only works in two scenarios: + +- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); +- Upcasting on class inheritance (subclass → superclass) + +Examples: + +```typescript +// primitive casting +let a: usize = 10 +let b: isize = 5 +let c: usize = a + (b as usize) +``` + +```typescript +// upcasting on class inheritance +class Bytes extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // same as: bytes as Uint8Array +``` + +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: + +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +// uint8Array // breaks in runtime :( +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +// bytes // breaks in runtime :( +``` + +For those cases, you can use the `changetype` function: + +```typescript +// downcasting on class inheritance +class Bytes extends Uint8Array {} + +let uint8Array = new Uint8Array(2) +changetype(uint8Array) // works :) +``` + +```typescript +// between two types that share a superclass +class Bytes extends Uint8Array {} +class ByteArray extends Uint8Array {} + +let bytes = new Bytes(2) +changetype(bytes) // works :) +``` + +If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. + +```typescript +// remove nullability +let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null + +if (previousBalance != null) { + return previousBalance as AccountBalance // safe remove null +} + +let newBalance = new AccountBalance(balanceId) +``` + +For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 + +Also we've added a few more static methods in some types to ease casting, they are: + +- Bytes.fromByteArray +- Bytes.fromUint8Array +- BigInt.fromByteArray +- ByteArray.fromBigInt + +### Nullability check with property access + +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: + +```typescript +let something: string | null = 'data' + +let somethingOrElse = something ? something : 'else' + +// or + +let somethingOrElse + +if (something) { + somethingOrElse = something +} else { + somethingOrElse = 'else' +} +``` + +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +``` + +Which outputs this error: + +```typescript +ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. + + let somethingOrElse: string = container.data ? container.data : "else"; + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``` + +To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: + +```typescript +class Container { + data: string | null +} + +let container = new Container() +container.data = 'data' + +let data = container.data + +let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +``` + +### Operator overloading with property access + +If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. + +```typescript +class BigInt extends Uint8Array { + @operator('+') + plus(other: BigInt): BigInt { + // ... + } +} + +class Wrapper { + public constructor(public n: BigInt | null) {} +} + +let x = BigInt.fromI32(2) +let y: BigInt | null = null + +x + y // give compile time error about nullability + +let wrapper = new Wrapper(y) + +wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +``` + +We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. + +```typescript +let wrapper = new Wrapper(y) + +if (!wrapper.n) { + wrapper.n = BigInt.fromI32(0) +} + +wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +``` + +### Value initialization + +If you have any code like this: + +```typescript +var value: Type // null +value.x = 10 +value.y = 'content' +``` + +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: + +```typescript +var value = new Type() // initialized +value.x = 10 +value.y = 'content' +``` + +Also if you have nullable properties in a GraphQL entity, like this: + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt +} +``` + +And you have code similar to this: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') + total.amount = BigInt.fromI32(0) +} + +total.tokens = total.tokens + BigInt.fromI32(1) +``` + +Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 + +```graphql +type Total @entity { + id: Bytes! + amount: BigInt! +} +``` + +```typescript +let total = Total.load('latest') + +if (total === null) { + total = new Total('latest') // already initializes non-nullable properties +} + +total.amount = total.amount + BigInt.fromI32(1) +``` + +### Class property initialization + +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: + +```typescript +class Thing {} + +export class Something { + value: Thing +} +``` + +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: + +```typescript +export class Something { + constructor(public value: Thing) {} +} + +// or + +export class Something { + value: Thing + + constructor(value: Thing) { + this.value = value + } +} + +// or + +export class Something { + value!: Thing +} +``` + +### Array initialization + +The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( +``` + +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: + +``` +ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type +``` + +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: + +```typescript +let arr = new Array(0) // [] + +arr.push('something') // ["something"] +``` + +Or you should mutate it via index: + +```typescript +let arr = new Array(5) // ["", "", "", "", ""] + +arr[0] = 'something' // ["something", "", "", "", ""] +``` + +### GraphQL schema + +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. + +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something]! # no longer valid +} +``` + +You'll have to add an `!` to the member of the List type, like this: + +```graphql +type Something @entity { + id: Bytes! +} + +type MyEntity @entity { + id: Bytes! + invalidField: [Something!]! # valid +} +``` + +This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). + +### Other + +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/zh/resources/migration-guides/graphql-validations-migration-guide.mdx b/website/src/pages/zh/resources/migration-guides/graphql-validations-migration-guide.mdx new file mode 100644 index 000000000000..29fed533ef8c --- /dev/null +++ b/website/src/pages/zh/resources/migration-guides/graphql-validations-migration-guide.mdx @@ -0,0 +1,538 @@ +--- +title: GraphQL Validations Migration Guide +--- + +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). + +Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. + +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. + +It will also ensure determinism of query responses, a key requirement on The Graph Network. + +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. + +To be compliant with those validations, please follow the migration guide. + +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. + +## Migration guide + +You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. + +> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. + +## Migration CLI tool + +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** + +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. + +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. + +### **Getting started** + +You can run the tool as follows: + +```bash +npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql +``` + +**Notes:** + +- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** +- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). + +### CLI output + +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: + +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) + +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). + +## Run your local queries against the preview schema + +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. + +You can try out queries by sending them to: + +- `https://api-next.thegraph.com/subgraphs/id/` + +or + +- `https://api-next.thegraph.com/subgraphs/name//` + +To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. + +## How to solve issues + +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. + +### GraphQL variables, operations, fragments, or arguments must be unique + +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. + +A GraphQL operation is only valid if it does not contain any ambiguity. + +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. + +Here's an example of a few invalid operations that violates these rules: + +**Duplicate Query name (#UniqueOperationNamesRule)** + +```graphql +# The following operation violated the UniqueOperationName +# rule, since we have a single operation with 2 queries +# with the same name +query myData { + id +} + +query myData { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id +} + +query myData2 { + # rename the second query + name +} +``` + +**Duplicate Fragment name (#UniqueFragmentNamesRule)** + +```graphql +# The following operation violated the UniqueFragmentName +# rule. +query myData { + id + ...MyFields +} + +fragment MyFields { + metadata +} + +fragment MyFields { + name +} +``` + +_Solution:_ + +```graphql +query myData { + id + ...MyFieldsName + ...MyFieldsMetadata +} + +fragment MyFieldsMetadata { # assign a unique name to fragment + metadata +} + +fragment MyFieldsName { # assign a unique name to fragment + name +} +``` + +**Duplicate variable name (#UniqueVariableNamesRule)** + +```graphql +# The following operation violates the UniqueVariables +query myData($id: String, $id: Int) { + id + ...MyFields +} +``` + +_Solution:_ + +```graphql +query myData($id: String) { + # keep the relevant variable (here: `$id: String`) + id + ...MyFields +} +``` + +**Duplicate argument name (#UniqueArgument)** + +```graphql +# The following operation violated the UniqueArguments +query myData($id: ID!) { + userById(id: $id, id: "1") { + id + } +} +``` + +_Solution:_ + +```graphql +query myData($id: ID!) { + userById(id: $id) { + id + } +} +``` + +**Duplicate anonymous query (#LoneAnonymousOperationRule)** + +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: + +```graphql +# This will fail if executed together in +# a single operation with the following two queries: +query { + someField +} + +query { + otherField +} +``` + +_Solution:_ + +```graphql +query { + someField + otherField +} +``` + +Or name the two queries: + +```graphql +query FirstQuery { + someField +} + +query SecondQuery { + otherField +} +``` + +### Overlapping Fields + +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. + +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. + +Here are a few examples of invalid operations that violate this rule: + +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Aliasing fields might cause conflicts, either with +# other aliases or other fields that exist on the +# GraphQL schema. +query { + dogs { + name: nickname + name + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + name: nickname + originalName: name # alias the original `name` field + } +} +``` + +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** + +```graphql +# Different arguments might lead to different data, +# so we can't assume the fields will be the same. +query { + dogs { + doesKnowCommand(dogCommand: SIT) + doesKnowCommand(dogCommand: HEEL) + } +} +``` + +_Solution:_ + +```graphql +query { + dogs { + knowsHowToSit: doesKnowCommand(dogCommand: SIT) + knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) + } +} +``` + +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: + +```graphql +query { + # Eventually, we have two "x" definitions, pointing + # to different fields! + ...A + ...B +} + +fragment A on Type { + x: a +} + +fragment B on Type { + x: b +} +``` + +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: + +```graphql +fragment mergeSameFieldsWithSameDirectives on Dog { + name @include(if: true) + name @include(if: false) +} +``` + +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) + +### Unused Variables or Fragments + +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. + +Here are a few examples for GraphQL operations that violates these rules: + +**Unused variable** (#NoUnusedVariablesRule) + +```graphql +# Invalid, because $someVar is never used. +query something($someVar: String) { + someData +} +``` + +_Solution:_ + +```graphql +query something { + someData +} +``` + +**Unused Fragment** (#NoUnusedFragmentsRule) + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +fragment AllFields { # unused :( + name + age +} +``` + +_Solution:_ + +```graphql +# Invalid, because fragment AllFields is never used. +query something { + someData +} + +# remove the `AllFields` fragment +``` + +### Invalid or missing Selection-Set (#ScalarLeafsRule) + +Also, a GraphQL field selection is only valid if the following is validated: + +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. + +Here are a few examples of violations of these rules with the following Schema: + +```graphql +type Image { + url: String! +} + +type User { + id: ID! + avatar: Image! +} + +type Query { + user: User! +} +``` + +**Invalid Selection-Set** + +```graphql +query { + user { + id { # Invalid, because "id" is of type ID and does not have sub-fields + + } + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + } +} +``` + +**Missing Selection-Set** + +```graphql +query { + user { + id + image # `image` requires a Selection-Set for sub-fields! + } +} +``` + +_Solution:_ + +```graphql +query { + user { + id + image { + src + } + } +} +``` + +### Incorrect Arguments values (#VariablesInAllowedPositionRule) + +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. + +Here are a few examples of invalid operations that violate these rules: + +```graphql +query purposes { + # If "name" is defined as "String" in the schema, + # this query will fail during validation. + purpose(name: 1) { + id + } +} + +# This might also happen when an incorrect variable is defined: + +query purposes($name: Int!) { + # If "name" is defined as `String` in the schema, + # this query will fail during validation, because the + # variable used is of type `Int` + purpose(name: $name) { + id + } +} +``` + +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) + +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. + +Those unknown references must be fixed: + +- rename if it was a typo +- otherwise, remove + +### Fragment: invalid spread or definition + +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** + +A Fragment cannot be spread on a non-applicable type. + +Example, we cannot apply a `Cat` fragment to the `Dog` type: + +```graphql +query { + dog { + ...CatSimple + } +} + +fragment CatSimple on Cat { + # ... +} +``` + +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** + +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. + +The following examples are invalid, since defining fragments on scalars is invalid. + +```graphql +fragment fragOnScalar on Int { + # we cannot define a fragment upon a scalar (`Int`) + something +} + +fragment inlineFragOnScalar on Dog { + ... on Boolean { + # `Boolean` is not a subtype of `Dog` + somethingElse + } +} +``` + +### Directives usage + +**Directive cannot be used at this location (#KnownDirectivesRule)** + +Only GraphQL directives (`@...`) supported by The Graph API can be used. + +Here is an example with The GraphQL supported directives: + +```graphql +query { + dog { + name @include(true) + age @skip(true) + } +} +``` + +_Note: `@stream`, `@live`, `@defer` are not supported._ + +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** + +The directives supported by The Graph can only be used once per location. + +The following is invalid (and redundant): + +```graphql +query { + dog { + name @include(true) @include(true) + } +} +``` diff --git a/website/src/pages/zh/resources/release-notes/assemblyscript-migration-guide.mdx b/website/src/pages/zh/resources/release-notes/assemblyscript-migration-guide.mdx deleted file mode 100644 index 622bdeef307e..000000000000 --- a/website/src/pages/zh/resources/release-notes/assemblyscript-migration-guide.mdx +++ /dev/null @@ -1,524 +0,0 @@ ---- -title: AssemblyScript 迁移指南 ---- - -到目前为止,子图一直在使用 [AssemblyScript 的第一个版本](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6) 之一。 最终,我们添加了对[最新版本](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) 的支持! 🎉 - -这将使子图开发人员能够使用 AS 语言和标准库的更新特性。 - -本指南适用于使用 `0.22.0` 版本以下的 `graph-cli`/`graph-ts` 的任何人。 如果您已经使用了高于(或等于)该版本号的版本,那么您已经在使用 AssemblyScript 的 `0.19.10` 版本 🙂。 - -> 注意:从 `0.24.0` 开始,`graph-node` 可以支持这两个版本,具体取决于子图清单文件中指定的 `apiVersion`。 - -## 特征 - -### 新功能 - -- `TypedArray`s 现在可以使用[新的`wrap`静态方法](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1))基于`ArrayBuffer`s 构建 -- 新的标准库函数: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`和`TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- 增加了对 x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2))的支持 -- 添加了 `StaticArray`, 一种更高效的数组变体 ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- 增加了 `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- 在`Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1))上实现了`radix` 参数 -- 添加了对浮点文字中的分隔符的支持 ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- 添加了对一级函数的支持 ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- 添加内置函数:`i32/i64/f32/f64.add/sub/mul` ([ v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- 实现了`Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- 添加了对模板文字字符串的支持([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- 添加了`encodeURI(Component)` 和 `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- 将 `toString`、`toDateString` 和 `toTimeString` 添加到 `Date` (\[v0.18.29\](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- 为`Date` 添加了`toUTCString`([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- 添加 `nonnull/NonNullable` 内置类型([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) - -### 优化 - -- `Math` 函数,例如 `exp`、`exp2`、`log`、`log2` 和 `pow` 已替换为更快的变体 ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- 些许优化了`Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- 在 std Map 和 Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) 中缓存更多字段访问 -- 在 `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2))中优化二的幂运算 - -### 其他 - -- 现在可以从数组内容中推断出数组文字的类型([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- 将 stdlib 更新为 Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - -## 如何升级? - -1. 将 `subgraph.yaml` 中的映射 `apiVersion` 更改为 `0.0.6`: - -```yaml -... -dataSources: - ... - mapping: - ... - apiVersion: 0.0.6 - ... -``` - -2. 通过运行以下命令,将您正在使用的 `graph-cli` 更新为 `latest` 版本: - -```bash -# if you have it globally installed -npm install --global @graphprotocol/graph-cli@latest - -# or in your subgraph if you have it as a dev dependency -npm install --save-dev @graphprotocol/graph-cli@latest -``` - -3. 对 `graph-ts` 执行相同的操作,但不是全局安装,而是将其保存在您的主要依赖项中: - -```bash -npm install --save @graphprotocol/graph-ts@latest -``` - -4. 参考指南的其余部分修复语言更改带来的问题。 -5. 再次运行 `codegen` 和 `deploy`。 - -## 重大变化 - -### 可空性 - -在旧版本的 AssemblyScript 上,您可以创建如下代码: - -```typescript -function load(): Value | null { ... } - -let maybeValue = load(); -maybeValue.aMethod(); -``` - -但是在较新的版本中,由于该值可以为空,因此需要您进行检查,如下所示: - -```typescript -let maybeValue = load() - -if (maybeValue) { - maybeValue.aMethod() // `maybeValue` is not null anymore -} -``` - -或者像这样编写代码: - -```typescript -let maybeValue = load()! // breaks in runtime if value is null - -maybeValue.aMethod() -``` - -如果您不确定选择哪个,我们建议始终使用安全的方式。 如果该值不存在,您可能只想在您的子图处理程序中,尽早执行一个带有 return 的 if 语句进行检查。 - -### 变量遮蔽 - -在您可以进行 [变量遮蔽](https://en.wikipedia.org/wiki/Variable_shadowing) 之前,这样的代码可以工作: - -```typescript -let a = 10 -let b = 20 -let a = a + b -``` - -但是现在这不可能了,编译器会返回这个错误: - -```typescript -ERROR TS2451: Cannot redeclare block-scoped variable 'a' - - let a = a + b; - ~~~~~~~~~~~~~ -in assembly/index.ts(4,3) -``` - -如果您有变量遮蔽的情况,则需要重命名重名变量。 - -### 空值比较 - -对子图进行升级后,有时您可能会遇到如下错误: - -```typescript -ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. - if (decimals == null) { - ~~~~ - in src/mappings/file.ts(41,21) -``` - -要解决此问题,您只需将 `if` 语句更改为如下所示代码: - -```typescript - if (!decimals) { - - // or - - if (decimals === null) { -``` - -如果您使用 != 而不是 ==,这同样适用。 - -### 强制转换 - -以前,进行强制转换的常用方法是使用 `as`关键字,如下所示: - -```typescript -let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray -``` - -但是,这只适用于两种情况: - -- 原始类型转换(在`u8`, `i32`, `bool`等类型之间; 例如: `let b: isize = 10; b as usize`); -- 在类继承时向上转换(子类 → 超类) - -例子: - -```typescript -// primitive casting -let a: usize = 10 -let b: isize = 5 -let c: usize = a + (b as usize) -``` - -```typescript -// upcasting on class inheritance -class Bytes extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // same as: bytes as Uint8Array -``` - -在两种情况下,您可能希望进行类型转换,但使用 `as`/`var` **并不安全**: - -- 在类继承时向下转换(超类 → 子类) -- 在共享超类的两种类型之间 - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -// uint8Array // breaks in runtime :( -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -// bytes // breaks in runtime :( -``` - -对于这些情况,您可以使用 `changetype` 函数: - -```typescript -// downcasting on class inheritance -class Bytes extends Uint8Array {} - -let uint8Array = new Uint8Array(2) -changetype(uint8Array) // works :) -``` - -```typescript -// between two types that share a superclass -class Bytes extends Uint8Array {} -class ByteArray extends Uint8Array {} - -let bytes = new Bytes(2) -changetype(bytes) // works :) -``` - -如果您只想去掉可空性,您可以继续使用 `as` 运算符(或 `variable`),但请确保您知道该值不会为空, 否则程序会出现问题。 - -```typescript -// remove nullability -let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null - -if (previousBalance != null) { - return previousBalance as AccountBalance // safe remove null -} - -let newBalance = new AccountBalance(balanceId) -``` - -对于可空性情况,我们建议查看[可空性检查功能](https://www.assemblyscript.org/basics.html#nullability-checks),它会让您的代码更简洁 🙂 - -我们还在某些类型中添加了一些静态方法来简化转换,它们是: - -- Bytes.fromByteArray -- Bytes.fromUint8Array -- BigInt.fromByteArray -- ByteArray.fromBigInt - -### 使用属性访问进行可空性检查 - -要使用 [可空性检查功能](https://www.assemblyscript.org/basics.html#nullability-checks),您可以使用 `if` 语句或三元运算符(`?` 和 `:`),如下所示: - -```typescript -let something: string | null = 'data' - -let somethingOrElse = something ? something : 'else' - -// or - -let somethingOrElse - -if (something) { - somethingOrElse = something -} else { - somethingOrElse = 'else' -} -``` - -但是,这仅在您对变量执行 `if` / 三元组而不是属性访问时才有效,如下所示: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile -``` - -输出此错误: - -```typescript -ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. - - let somethingOrElse: string = container.data ? container.data : "else"; - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``` - -要解决此问题,您可以为该属性访问创建一个变量,以便编译器可以执行可空性检查: - -```typescript -class Container { - data: string | null -} - -let container = new Container() -container.data = 'data' - -let data = container.data - -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) -``` - -### 具有属性访问的运算符重载 - -如果您尝试将可空类型(来自属性访问)与不可空类型相加,AssemblyScript 编译器不会给出编译时错误警告其中一个值可以为空,它只是静默编译,这会导致代码在运行时可能出现问题。 - -```typescript -class BigInt extends Uint8Array { - @operator('+') - plus(other: BigInt): BigInt { - // ... - } -} - -class Wrapper { - public constructor(public n: BigInt | null) {} -} - -let x = BigInt.fromI32(2) -let y: BigInt | null = null - -x + y // give compile time error about nullability - -let wrapper = new Wrapper(y) - -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should -``` - -我们为此在 AssemblyScript 编译器上提出了一个 issue,但现在如果您在子图映射中执行此类操作,您应该在之前进行空值检查。 - -```typescript -let wrapper = new Wrapper(y) - -if (!wrapper.n) { - wrapper.n = BigInt.fromI32(0) -} - -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt -``` - -### 值初始化 - -如果您有这样的代码: - -```typescript -var value: Type // null -value.x = 10 -value.y = 'content' -``` - -代码将编译成功,但在运行时会出现问题,这是因为值尚未初始化,因此请确保您的子图已初始化变量的值,如下所示: - -```typescript -var value = new Type() // initialized -value.x = 10 -value.y = 'content' -``` - -此外,如果您在 GraphQL 实体中有可为空的属性,如下所示: - -```graphql -type Total @entity { - id: Id! - amount: BigInt -} -``` - -同时,您有类似这样的代码: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -您需要确保初始化 `total.amount` 值,因为如果您尝试像最后一行代码一样求和,程序将崩溃。 所以你要么先初始化它: - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') - total.amount = BigInt.fromI32(0) -} - -total.tokens = total.tokens + BigInt.fromI32(1) -``` - -或者您可以更改您的 GraphQL 模式,不给此属性赋予可为空的类型,然后您在 `codegen` 步骤中将其初始化为零 😉 - -```graphql -type Total @entity { - id: Bytes! - amount: BigInt! -} -``` - -```typescript -let total = Total.load('latest') - -if (total === null) { - total = new Total('latest') // already initializes non-nullable properties -} - -total.amount = total.amount + BigInt.fromI32(1) -``` - -### 类属性初始化 - -如果您导出任何具有其他类(由您或标准库声明)的属性的类,如下所示: - -```typescript -class Thing {} - -export class Something { - value: Thing -} -``` - -编译器会报错,因为您需要为类属性添加初始化程序,或者添加 `!` 运算符: - -```typescript -export class Something { - constructor(public value: Thing) {} -} - -// or - -export class Something { - value: Thing - - constructor(value: Thing) { - this.value = value - } -} - -// or - -export class Something { - value!: Thing -} -``` - -### 数组初始化 - -`Array` 类仍然接受一个数字来初始化列表的长度,但是您应该小心,因为像`.push`的操作实际上会增加大小,而不是添加到开头,例如: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( -``` - -根据您使用的类型,例如可以为空的类型,以及访问它们的方式,您可能会遇到类似下面这样的运行时错误: - -``` -ERRO Handler 由于执行失败而跳过,错误: 映射在 ~ lib/array.ts,第110行,第40列中止,并且带有消息: 如果 array 是漏洞 wasm 反向跟踪,那么 Element type 必须为 null: 0:0x19c4-!~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type -``` - -要想真正在开始的时候推入,你应该将 `Array` 初始化为大小为零,如下所示: - -```typescript -let arr = new Array(0) // [] - -arr.push('something') // ["something"] -``` - -或者,你可以通过索引对其进行改变: - -```typescript -let arr = new Array(5) // ["", "", "", "", ""] - -arr[0] = 'something' // ["something", "", "", "", ""] -``` - -### GraphQL 模式 - -这不是一个直接的 AssemblyScript 更改,但是您可能需要更新 `schema.Graphql` 文件。 - -现在,您不再能够在类型中定义属于非空列表的字段。如果您有这样的模式: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something]! # no longer valid -} -``` - -您必须向 List 类型的成员添加一个`!` ,如下所示: - -```graphql -type Something @entity { - id: Bytes! -} - -type MyEntity @entity { - id: Bytes! - invalidField: [Something!]! # valid -} -``` - -AssemblyScript 版本之间的可空性差异导致了这种改变, 并且这也与 `src/generated/schema.ts`文件(默认路径,您可能已更改)有关。 - -### 其他 - -- 将 `Map#set` 和 `Set#add` 与规范对齐,返回 `this` (\[v0.9.2\](https://github.com/AssemblyScript /assemblyscript/releases/tag/v0.9.2)) -- 数组不再继承自 ArrayBufferView,并且现在是完全不同的 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- 从对象字面初始化的类不能再定义构造函数([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- 如果两个操作数都是整数,则 `**` 二元运算的结果现在是公分母整数。 以前,结果是一个浮点数,就像调用 `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- 在转换为 `bool` 时强制 `NaN` 为 `false` (\[v0.14.9\](https://github.com/AssemblyScript/assemblyscript/releases/tag /v0.14.9)) -- 当移动 `i8`/`u8` 或 `i16`/`u16` 类型的小整数值时,只有 4 个 RHS 值的最低有效位中的 3 个会影响结果,类似于 `i32.shl` 的结果仅受 RHS 值的 5 个最低有效位影响。 示例:`someI8 << 8` 以前生成值 `0`,但现在由于将 RHS 屏蔽为`8 & 7 = 0` (3 比特), 而生成 `someI8`([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- 大小不同时关系字符串比较的错误修复 ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/src/pages/zh/resources/release-notes/graphql-validations-migration-guide.mdx b/website/src/pages/zh/resources/release-notes/graphql-validations-migration-guide.mdx deleted file mode 100644 index 8c4d5c52d93a..000000000000 --- a/website/src/pages/zh/resources/release-notes/graphql-validations-migration-guide.mdx +++ /dev/null @@ -1,539 +0,0 @@ ---- -title: GraphQL验证迁移指南 ---- - -很快,“graph-节点”将支持[GraphQL验证规范]的100%覆盖率(https://spec.graphql.org/June2018/#sec-验证)。 - -“graph-节点”的早期版本不支持所有验证,并提供了更优雅的响应——因此,在出现歧义的情况下,“graph-节点”会忽略无效的GraphQL操作组件。 - -GraphQL验证支持是即将推出的新功能和Graph网络规模性能的支柱。 - -它还将确保查询响应的确定性,这是Graph网络的一个关键要求。 - -**启用GraphQL验证将中断发送到Graph API的一些现有查询**。 - -为了符合这些验证,请遵循迁移指南。 - -> > ⚠️ 如果您不在验证推出之前迁移查询,它们将返回错误,并可能破坏您的前端/客户端。 - -## 迁移指南 - -您可以使用CLI迁移工具查找GraphQL操作中的任何问题并进行修复。或者,您可以更新GraphQL客户端的端点,以使用`https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`端点。针对此端点测试查询将帮助您发现查询中的问题。 - -> > 如果您使用[GraphQL ESlint],并不是所有的子图都需要迁移(https://the-guild.dev/graphql/eslint/docs)或[GraphQL代码生成器](https://the-guild.dev/graphql/codegen),它们已经确保了您的查询是有效的。 - -## 迁移CLI工具 - -**大多数GraphQL操作错误都可以提前在代码库中找到** - -因此,我们在开发过程中或在CI中为验证GraphQL操作提供了流畅的体验。 - -[`@graphql验证/cli`](https://github.com/saihaj/graphql-validate)是一个简单的CLI工具,有助于根据给定的模式验证GraphQL操作。 - -### **入门** - -您可以按如下方式运行该工具: - -```bash -npx@graphql验证/cli-shttps://api-npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql -``` - -**注意事项:** - -- \-使用适当的值设置或替换$GITHUB_USER、$SUBGRAPH_NAME。类似:[`artblocks/artblocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- \-预览架构URL(https://api-next.thegraph.com/)所提供的是速率严重受限,并且一旦所有用户都迁移到新版本,就将是逐步关闭期。**不要在生产中使用它** -- \-操作在具有以下扩展名的文件中标识[`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-文件加载器)[`.ts`,`.tsx`,`.js`,`jsx`](https://www.graphql-tools.com/docs/schema-loading#code-文件加载程序)(“-o”选项)。 - -### CLI输出 - -`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)`CLI工具将输出任何GraphQL操作错误,如下所示: - -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) - -对于每个错误,您都会找到一个描述、文件路径和位置,以及到解决方案示例的链接(请参阅以下部分)。 - -## 根据预览模式运行本地查询 - -我们提供了一个端点`https://api-next.thegraph.com/`它运行一个启用了验证的“graph-节点”版本。 - -您可以尝试将查询发送到: - -- `https://api-next.thegraph.com/subgraphs/id/` - -或者 - -- `https://api-next.thegraph.com/subgraphs/name//` - -要处理标记为存在验证错误的查询,可以使用您最喜欢的GraphQL查询工具,如Altair或[GraphiQL](https://cloud.hasura.io/public/graphiql),然后尝试您的查询。这些工具还会在用户界面中标记这些错误,甚至在您运行之前。 - -## 如何解决问题 - -下面,您将发现现有GraphQL操作中可能发生的所有GraphQL验证错误。 - -### GraphQL变量、操作、片段或参数必须是唯一的 - -我们应用了规则来确保操作包含一组唯一的GraphQL变量、操作、片段和参数。 - -GraphQL操作只有在不包含任何歧义的情况下才有效。 - -为了实现这一点,我们需要确保GraphQL操作中的某些组件必须是唯一的。 - -以下是一些违反这些规则的无效操作的示例: - -**重复查询名称(#UniqueOperationNamesRule)** - -```graphql -#以下操作违反了UniqueOperationName -#规则,因为我们有一个带有2个查询的单个操作 -#具有相同的名称 -query myData { - id -} - -query myData { - name -} -``` - -_解决方案:_ - -```graphql -query myData { - id -} - -query myData2 { - # 重新给第二个查询命名 - name -} -``` - -**重复操作名称(#UniqueOperationNamesRule)** - -```graphql -#以下操作违反了UniqueOperationName -#规则。 -query myData { - id - ...MyFields -} - -fragment MyFields { - metadata -} - -fragment MyFields { - name -} -``` - -_解决方案:_ - -```graphql -query myData { - id - ...MyFieldsName - ...MyFieldsMetadata -} - -fragment MyFieldsMetadata { # assign a unique name to fragment - metadata -} - -fragment MyFieldsName { # assign a unique name to fragment - name -} -``` - -**重复变量名称(#UniqueOperationNamesRule)** - -```graphql -#以下操作违反了UniqueVariables -query myData($id: String, $id: Int) { - id - ...MyFields -} -``` - -_解决方案:_ - -```graphql -query myData($id: String) { - # 保持相关变量(这里是: `$id: String`) - id - ...MyFields -} -``` - -**重复参数名称(#UniqueOperationNamesRule)** - -```graphql -#以下操作违反了UniqueArguments -query myData($id: ID!) { - userById(id: $id, id: "1") { - id - } -} -``` - -_解决方案:_ - -```graphql -query myData($id: ID!) { - userById(id: $id) { - id - } -} -``` - -**重复的匿名查询(#LoneAnonymousOperationRule)** - -此外,由于响应结构中的冲突,使用两个匿名操作将违反“LoneAnonymousOperation”规则: - -```graphql -#如果在中一起执行,则此操作将失败 -#具有以下两个查询的单个操作: -query { - someField -} - -query { - otherField -} -``` - -_解决方案:_ - -```graphql -query { - someField - otherField -} -``` - -或者命名两个查询: - -```graphql -query FirstQuery { - someField -} - -query SecondQuery { - otherField -} -``` - -### 重叠域 - -GraphQL选择集只有在正确解析最终结果集时才被视为有效。 - -如果特定的选择集或字段由于所选字段或使用的参数而产生歧义,GraphQL服务将无法验证该操作。 - -以下是一些违反这些规则的无效操作的示例: - -**字段别名冲突(#OverlappingFieldsCanBeMergedRule)** - -```graphql -#别名字段可能会导致冲突,或者 -#存在的其他别名或其他字段 -#在GraphQL模式上。 -query { - dogs { - name: nickname - name - } -} -``` - -_解决方案:_ - -```graphql -query { - dogs { - name: nickname - originalName: name # 是原名 `name` 的别名 - } -} -``` - -**参数字段冲突(#OverlappingFieldsCanBeMergedRule)** - -```graphql -#不同的参数可能导致不同的数据, -#所以我们不能假设字段是相同的。 -query { - dogs { - doesKnowCommand(dogCommand: SIT) - doesKnowCommand(dogCommand: HEEL) - } -} -``` - -_解决方案:_ - -```graphql -query { - dogs { - knowsHowToSit: doesKnowCommand(dogCommand: SIT) - knowsHowToHeel: doesKnowCommand(dogCommand: HEEL) - } -} -``` - -此外,在更复杂的用例中,您可能会使用两个片段来违反此规则,这两个片段可能会在最终预期的集合中引起冲突: - -```graphql -query { - # 最终,我们有两个"x"的定义,指向 - # 不同的域! - ...A - ...B -} - -fragment A on Type { - x: a -} - -fragment B on Type { - x: b -} -``` - -除此之外,客户端GraphQL指令(如“@skip”和“@include”)可能会导致歧义,例如: - -```graphql -fragment mergeSameFieldsWithSameDirectives on Dog { - name @include(if: true) - name @include(if: false) -} -``` - -[您可以在此处阅读有关算法的更多信息。](https://spec.graphql.org/June2018/#sec-字段选择合并) - -### 未使用的变量或片段 - -只有当使用了所有操作定义的组件(变量、片段)时,GraphQL操作才被认为是有效的。 - -以下是一些违反这些规则的无效操作的示例: - -**未使用的变量**(#NoUnusedVariablesRule) - -````graphql -#无效,因为从未使用过$someVar。 -查询某物($someVar: String){ -某些数据 -} - -``` -```` - -_解决方案:_ - -```graphql -query something { - someData -} -``` - -**未使用的片段**(#NoUnusedFragmentsRule) - -```graphql -#无效,因为片段AllFields从未使用过。 -询问某事{ - someData -} -fragment AllFields { # unused :( - name - age -} -``` - -_解决方案:_ - -```graphql -#无效,因为片段AllFields从未使用过。 -询问某事{ - someData -} - -#删除`AllFields'片段 -``` - -### 无效或缺少选择集(#ScalarLeafsRule) - -此外,GraphQL字段选择只有在以下内容得到验证时才有效: - -- 指定了对象字段的必备选择集。 -- 边缘字段(标量、枚举)不能指定选择集。 - -以下是下列模式违反这些规则的几个示例: - -```graphql -type Image { - url: String! -} - -type User { - id: ID! - avatar: Image! -} - -type Query { - user: User! -} -``` - -**无效的选择集** - -```graphql -query { - user { - id { # 无效, 因为"id"是ID类型且没有子域 - - } - } -} -``` - -_解决方案:_ - -```graphql -query { - user { - id - } -} -``` - -**缺少选择集** - -```graphql -query { - user { - id - image # `image`需要一个子域的选择集 - } -} -``` - -_解决方案:_ - -```graphql -query { - user { - id - image { - src - } - } -} -``` - -### 参数值不正确(#VariablesInAllowedPositionRule) - -根据模式中定义的值,将硬编码值传递给参数的GraphQL操作必须有效。 - -以下是一些违反这些规则的无效操作的示例: - -```graphql -query purposes { - # 如果“name”在模式中被定义为“String”, - #此查询将在验证过程中失败。 - purpose(name: 1) { - id - } -} - -#当定义了不正确的变量时,也可能发生这种情况: - -query purposes($name: Int!) { - #如果“name”在模式中被定义为“String”, - #此查询将在验证期间失败,因为 - #使用的变量类型为`Int` - purpose(name: $name) { - id - } -} -``` - -### 未知类型、变量、片段或指令(#UnknownX) - -如果使用任何未知类型、变量、片段或指令,GraphQLAPI将引发错误。 - -必须修复这些未知引用: - -- 如果是打字错误,请重命名 -- 否则,请删除 - -### 片段:无效的排列或定义 - -**无效的片段排列(#PossibleFragmentSpreadsRule)** - -片段不能在不适用的类型上展开。 - -例如,我们不能将“Cat”片段应用于“Dog”类型: - -```graphql -query { - dog { - ...CatSimple - } -} - -fragment CatSimple on Cat { - # ... -} -``` - -**片段定义无效(#FragmentsOnCompositeTypesRule)** - -所有Fragment都必须在复合类型上定义(使用“on…”),简而言之:对象、接口或并集。 - -以下示例无效,因为在标量上定义fragment是无效的。 - -```graphql -fragment fragOnScalar on Int { - # 不能在标量`int`上定义`fragment` - something -} - -fragment inlineFragOnScalar on Dog { - ... on Boolean { - # `Boolean` 不是`Dog`的子类型 - somethingElse - } -} -``` - -### 指令使用 - -**指令不能在此位置使用(#KnownDirectivesRule)** - -只能使用Graph API支持的GraphQL指令(`@…`)。 - -以下是GraphQL支持的指令示例: - -```graphql -query { - dog { - name @include(true) - age @skip(true) - } -} -``` - -_注意:不支持“@stream”、“@live”和“@defer”_ - -**指令在此位置只能使用一次(#UniqueDirectivesPerLocationRule)** - -The Graph支持的指令在每个位置只能使用一次。 - -以下内容无效(并且是多余的): - -```graphql -query { - dog { - name @include(true) @include(true) - } -} -``` diff --git a/website/src/pages/zh/resources/subgraph-studio-faq.mdx b/website/src/pages/zh/resources/subgraph-studio-faq.mdx new file mode 100644 index 000000000000..8761f7a31bf6 --- /dev/null +++ b/website/src/pages/zh/resources/subgraph-studio-faq.mdx @@ -0,0 +1,31 @@ +--- +title: Subgraph Studio FAQs +--- + +## 1. What is Subgraph Studio? + +[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. + +## 2. How do I create an API Key? + +To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. + +## 3. Can I create multiple API Keys? + +Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). + +## 4. How do I restrict a domain for an API Key? + +After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. + +## 5. Can I transfer my subgraph to another owner? + +Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. + +Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. + +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? + +You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. + +Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. diff --git a/website/src/pages/zh/subgraphs/_meta-titles.json b/website/src/pages/zh/subgraphs/_meta-titles.json index 15d4bb5577b5..0556abfc236c 100644 --- a/website/src/pages/zh/subgraphs/_meta-titles.json +++ b/website/src/pages/zh/subgraphs/_meta-titles.json @@ -1,5 +1,6 @@ { "querying": "Querying", "developing": "Developing", - "cookbook": "Cookbook" + "cookbook": "Cookbook", + "best-practices": "Best Practices" } diff --git a/website/src/pages/zh/subgraphs/_meta.js b/website/src/pages/zh/subgraphs/_meta.js index cdea2804a3da..3b490f214d14 100644 --- a/website/src/pages/zh/subgraphs/_meta.js +++ b/website/src/pages/zh/subgraphs/_meta.js @@ -7,4 +7,5 @@ export default { developing: titles.developing, billing: '', cookbook: titles.cookbook, + 'best-practices': titles['best-practices'], } diff --git a/website/src/pages/zh/subgraphs/best-practices/_meta.js b/website/src/pages/zh/subgraphs/best-practices/_meta.js new file mode 100644 index 000000000000..90464547a8f4 --- /dev/null +++ b/website/src/pages/zh/subgraphs/best-practices/_meta.js @@ -0,0 +1,8 @@ +export default { + pruning: 'Pruning', + derivedfrom: 'Arrays with @derivedFrom', + 'immutable-entities-bytes-as-ids': 'Immutable Entities and Bytes as IDs', + 'avoid-eth-calls': 'Avoiding eth_calls', + timeseries: 'Timeseries & Aggregations', + 'grafting-hotfix': 'Grafting & Hotfixing', +} diff --git a/website/src/pages/zh/subgraphs/best-practices/avoid-eth-calls.mdx b/website/src/pages/zh/subgraphs/best-practices/avoid-eth-calls.mdx new file mode 100644 index 000000000000..4b24fafac947 --- /dev/null +++ b/website/src/pages/zh/subgraphs/best-practices/avoid-eth-calls.mdx @@ -0,0 +1,117 @@ +--- +title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls +sidebarTitle: 'Subgraph Best Practice 4: Avoiding eth_calls' +--- + +## TLDR + +`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. + +## Why Avoiding `eth_calls` Is a Best Practice + +Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. + +### What Does an eth_call Look Like? + +`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: + +```yaml +event Transfer(address indexed from, address indexed to, uint256 value); +``` + +Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, Transfer } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransfer(event: Transfer): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + // Bind the ERC20 contract instance to the given address: + let instance = ERC20.bind(event.address) + + // Retrieve pool information via eth_call + let poolInfo = instance.getPoolInfo(event.params.to) + + transaction.pool = poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is functional, however is not ideal as it slows down our subgraph’s indexing. + +## How to Eliminate `eth_calls` + +Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: + +``` +event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); +``` + +With this update, the subgraph can directly index the required data without external calls: + +```typescript +import { Address } from '@graphprotocol/graph-ts' +import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' +import { TokenTransaction } from '../generated/schema' + +export function handleTransferWithPool(event: TransferWithPool): void { + let transaction = new TokenTransaction(event.transaction.hash.toHex()) + + transaction.pool = event.params.poolInfo.toHexString() + transaction.from = event.params.from.toHexString() + transaction.to = event.params.to.toHexString() + transaction.value = event.params.value + + transaction.save() +} +``` + +This is much more performant as it has eliminated the need for `eth_calls`. + +## How to Optimize `eth_calls` + +If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. + +## Reducing the Runtime Overhead of `eth_calls` + +For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. + +Currently, `eth_calls` can only be declared for event handlers. In the manifest, write + +```yaml +event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) +handler: handleTransferWithPool +calls: + ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) +``` + +The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. + +The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. + +Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. + +## Conclusion + +You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/best-practices/derivedfrom.mdx b/website/src/pages/zh/subgraphs/best-practices/derivedfrom.mdx new file mode 100644 index 000000000000..344c906ffe55 --- /dev/null +++ b/website/src/pages/zh/subgraphs/best-practices/derivedfrom.mdx @@ -0,0 +1,88 @@ +--- +title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom +sidebarTitle: 'Subgraph Best Practice 2: Arrays with @derivedFrom' +--- + +## TLDR + +Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. + +## How to Use the `@derivedFrom` Directive + +You just need to add a `@derivedFrom` directive after your array in your schema. Like this: + +```graphql +comments: [Comment!]! @derivedFrom(field: "post") +``` + +`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. + +### Example Use Case for `@derivedFrom` + +An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. + +Let’s start with our two entities, `Post` and `Comment` + +Without optimization, you could implement it like this with an array: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! +} + +type Comment @entity { + id: Bytes! + content: String! +} +``` + +Arrays like these will effectively store extra Comments data on the Post side of the relationship. + +Here’s what an optimized version looks like using `@derivedFrom`: + +```graphql +type Post @entity { + id: Bytes! + title: String! + content: String! + comments: [Comment!]! @derivedFrom(field: "post") +} + +type Comment @entity { + id: Bytes! + content: String! + post: Post! +} +``` + +Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. + +This will not only make our subgraph more efficient, but it will also unlock three features: + +1. We can query the `Post` and see all of its comments. +2. We can do a reverse lookup and query any `Comment` and see which post it comes from. + +3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. + +## Conclusion + +Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. + +For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/best-practices/grafting-hotfix.mdx b/website/src/pages/zh/subgraphs/best-practices/grafting-hotfix.mdx new file mode 100644 index 000000000000..ae41a5ce20ba --- /dev/null +++ b/website/src/pages/zh/subgraphs/best-practices/grafting-hotfix.mdx @@ -0,0 +1,187 @@ +--- +title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment +sidebarTitle: 'Subgraph Best Practice 6: Grafting and Hotfixing' +--- + +## TLDR + +Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. + +### Overview + +This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. + +## Benefits of Grafting for Hotfixes + +1. **Rapid Deployment** + + - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. + - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. + +2. **Data Preservation** + + - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. + - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. + +3. **Efficiency** + - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. + - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. + +## Best Practices When Using Grafting for Hotfixes + +1. **Initial Deployment Without Grafting** + + - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. + - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. + +2. **Implementing the Hotfix with Grafting** + + - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. + - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. + - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. + - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. + +3. **Post-Hotfix Actions** + + - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. + - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. + > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. + - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. + +4. **Important Considerations** + - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. + - **Tip**: Use the block number of the last correctly processed event. + - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. + - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. + - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. + +## Example: Deploying a Hotfix with Grafting + +Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. + +1. **Failed Subgraph Manifest (subgraph.yaml)** + + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: OldSmartContract + network: sepolia + source: + address: '0xOldContractAddress' + abi: Lock + startBlock: 5000000 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/OldLock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleOldWithdrawal + file: ./src/old-lock.ts + ``` + +2. **New Grafted Subgraph Manifest (subgraph.yaml)** + ```yaml + specVersion: 1.0.0 + schema: + file: ./schema.graphql + dataSources: + - kind: ethereum/contract + name: NewSmartContract + network: sepolia + source: + address: '0xNewContractAddress' + abi: Lock + startBlock: 6000001 # Block after the last indexed block + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Withdrawal + abis: + - name: Lock + file: ./abis/Lock.json + eventHandlers: + - event: Withdrawal(uint256,uint256) + handler: handleWithdrawal + file: ./src/lock.ts + features: + - grafting + graft: + base: QmBaseDeploymentID # Deployment ID of the failed subgraph + block: 6000000 # Last successfully indexed block + ``` + +**Explanation:** + +- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. +- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. +- **Grafting Configuration**: + - **base**: Deployment ID of the failed subgraph. + - **block**: Block number where grafting should begin. + +3. **Deployment Steps** + + - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). + - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. + - **Deploy the Subgraph**: + - Authenticate with the Graph CLI. + - Deploy the new subgraph using `graph deploy`. + +4. **Post-Deployment** + - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. + - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. + - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. + +## Warnings and Cautions + +While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. + +- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. +- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. +- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. + +### Risk Management + +- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. +- **Testing**: Always test grafting in a development environment before deploying to production. + +## Conclusion + +Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: + +- **Quickly Recover** from critical errors without re-indexing. +- **Preserve Historical Data**, maintaining continuity for applications and users. +- **Ensure Service Availability** by minimizing downtime during critical fixes. + +However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. + +## Additional Resources + +- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting +- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. + +By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx b/website/src/pages/zh/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx new file mode 100644 index 000000000000..067f26ffacf7 --- /dev/null +++ b/website/src/pages/zh/subgraphs/best-practices/immutable-entities-bytes-as-ids.mdx @@ -0,0 +1,191 @@ +--- +title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs +sidebarTitle: 'Subgraph Best Practice 3: Immutable Entities and Bytes as IDs' +--- + +## TLDR + +Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. + +## Immutable Entities + +To make an entity immutable, we simply add `(immutable: true)` to an entity. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. + +Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging onchain event data, such as a `Transfer` event being logged as a `Transfer` entity. + +### Under the hood + +Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. + +### When not to use Immutable Entities + +If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. + +## Bytes as IDs + +Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. + +```graphql +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} +``` + +While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. + +### Reasons to Not Use Bytes as IDs + +1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. +2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. +3. Indexing and querying performance improvements are not desired. + +### Concatenating With Bytes as IDs + +It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. + +Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. + +```typescript +export function handleTransfer(event: TransferEvent): void { + let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) + entity.from = event.params.from + entity.to = event.params.to + entity.value = event.params.value + + entity.blockNumber = event.block.number + entity.blockTimestamp = event.block.timestamp + entity.transactionHash = event.transaction.hash + + entity.save() +} +``` + +### Sorting With Bytes as IDs + +Sorting using Bytes as IDs is not optimal as seen in this example query and response. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: id) { + id + from + to + value + } +} +``` + +Query response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x00010000", + "from": "0xabcd...", + "to": "0x1234...", + "value": "256" + }, + { + "id": "0x00020000", + "from": "0xefgh...", + "to": "0x5678...", + "value": "512" + }, + { + "id": "0x01000000", + "from": "0xijkl...", + "to": "0x9abc...", + "value": "1" + } + ] + } +} +``` + +The IDs are returned as hex. + +To improve sorting, we should create another field on the entity that is a BigInt. + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! # address + to: Bytes! # address + value: BigInt! # unit256 + tokenId: BigInt! # uint256 +} +``` + +This will allow for sorting to be optimized sequentially. + +Query: + +```graphql +{ + transfers(first: 3, orderBy: tokenId) { + id + tokenId + } +} +``` + +Query Response: + +```json +{ + "data": { + "transfers": [ + { + "id": "0x…", + "tokenId": "1" + }, + { + "id": "0x…", + "tokenId": "2" + }, + { + "id": "0x…", + "tokenId": "3" + } + ] + } +} +``` + +## Conclusion + +Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. + +Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/best-practices/pruning.mdx b/website/src/pages/zh/subgraphs/best-practices/pruning.mdx new file mode 100644 index 000000000000..b620e504ab86 --- /dev/null +++ b/website/src/pages/zh/subgraphs/best-practices/pruning.mdx @@ -0,0 +1,56 @@ +--- +title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning +sidebarTitle: 'Subgraph Best Practice 1: Pruning with indexerHints' +--- + +## TLDR + +[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. + +## How to Prune a Subgraph With `indexerHints` + +Add a section called `indexerHints` in the manifest. + +`indexerHints` has three `prune` options: + +- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. +- `prune: `: Sets a custom limit on the number of historical blocks to retain. +- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. + +We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: + +```yaml +specVersion: 1.0.0 +schema: + file: ./schema.graphql +indexerHints: + prune: auto +dataSources: + - kind: ethereum/contract + name: Contract + network: mainnet +``` + +## Important Considerations + +- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. + +- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). + +## Conclusion + +Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/best-practices/timeseries.mdx b/website/src/pages/zh/subgraphs/best-practices/timeseries.mdx new file mode 100644 index 000000000000..2c721a9cef23 --- /dev/null +++ b/website/src/pages/zh/subgraphs/best-practices/timeseries.mdx @@ -0,0 +1,195 @@ +--- +title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations +sidebarTitle: 'Subgraph Best Practice 5: Timeseries and Aggregations' +--- + +## TLDR + +Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. + +## Overview + +Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. + +## Benefits of Timeseries and Aggregations + +1. Improved Indexing Time + +- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. +- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. + +2. Simplified Mapping Code + +- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. +- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. + +3. Dramatically Faster Queries + +- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. +- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. + +### Important Considerations + +- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. +- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. +- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. + +## How to Implement Timeseries and Aggregations + +### Defining Timeseries Entities + +A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: + +- Immutable: Timeseries entities are always immutable. +- Mandatory Fields: + - `id`: Must be of type `Int8!` and is auto-incremented. + - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. + +Example: + +```graphql +type Data @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + price: BigDecimal! +} +``` + +### Defining Aggregation Entities + +An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: + +- Annotation Arguments: + - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). + +Example: + +```graphql +type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { + id: Int8! + timestamp: Timestamp! + sum: BigDecimal! @aggregate(fn: "sum", arg: "price") +} +``` + +In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. + +### Querying Aggregated Data + +Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. + +Example: + +```graphql +{ + tokenStats( + interval: "hour" + where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } + ) { + id + timestamp + token { + id + } + totalVolume + priceUSD + count + } +} +``` + +### Using Dimensions in Aggregations + +Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. + +Example: + +### Timeseries Entity + +```graphql +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + token: Token! + amount: BigDecimal! + priceUSD: BigDecimal! +} +``` + +### Aggregation Entity with Dimension + +```graphql +type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { + id: Int8! + timestamp: Timestamp! + token: Token! + totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") + priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") + count: Int8! @aggregate(fn: "count", cumulative: true) +} +``` + +- Dimension Field: token groups the data, so aggregates are computed per token. +- Aggregates: + - totalVolume: Sum of amount. + - priceUSD: Last recorded priceUSD. + - count: Cumulative count of records. + +### Aggregation Functions and Expressions + +Supported aggregation functions: + +- sum +- count +- min +- max +- first +- last + +### The arg in @aggregate can be + +- A field name from the timeseries entity. +- An expression using fields and constants. + +### Examples of Aggregation Expressions + +- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") +- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") +- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") + +Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. + +### Query Parameters + +- interval: Specifies the time interval (e.g., "hour"). +- where: Filters based on dimensions and timestamp ranges. +- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). + +### Notes + +- Sorting: Results are automatically sorted by timestamp and id in descending order. +- Current Data: An optional current argument can include the current, partially filled interval. + +### Conclusion + +Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: + +- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. +- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. +- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. + +By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. + +## Subgraph Best Practices 1-6 + +1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) + +2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) + +3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) + +4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) + +5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) + +6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/cookbook/_meta.js b/website/src/pages/zh/subgraphs/cookbook/_meta.js index 66c172da5ef0..b9219a03a60a 100644 --- a/website/src/pages/zh/subgraphs/cookbook/_meta.js +++ b/website/src/pages/zh/subgraphs/cookbook/_meta.js @@ -6,12 +6,6 @@ export default { grafting: '', 'subgraph-uncrashable': '', 'transfer-to-the-graph': '', - pruning: '', - derivedfrom: '', - 'immutable-entities-bytes-as-ids': '', - 'avoid-eth-calls': '', - timeseries: '', - 'grafting-hotfix': '', enums: '', 'secure-api-keys-nextjs': '', polymarket: '', diff --git a/website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx b/website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index a0613bf2b69f..000000000000 --- a/website/src/pages/zh/subgraphs/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx b/website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx deleted file mode 100644 index 22845a8d7dd2..000000000000 --- a/website/src/pages/zh/subgraphs/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/subgraphs/developing/creating/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx b/website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx deleted file mode 100644 index 8153e7143816..000000000000 --- a/website/src/pages/zh/subgraphs/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### 概述 - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## 其他资源 - -- **[Grafting Documentation](/subgraphs/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/subgraphs/querying/subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx b/website/src/pages/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index ed3d902cfad3..000000000000 --- a/website/src/pages/zh/subgraphs/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/cookbook/pruning.mdx b/website/src/pages/zh/subgraphs/cookbook/pruning.mdx deleted file mode 100644 index c6b1217db9a5..000000000000 --- a/website/src/pages/zh/subgraphs/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/subgraphs/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/subgraphs/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/cookbook/timeseries.mdx b/website/src/pages/zh/subgraphs/cookbook/timeseries.mdx deleted file mode 100644 index 7ac1de481272..000000000000 --- a/website/src/pages/zh/subgraphs/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## 概述 - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -例子: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -例子: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -例子: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -例子: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/subgraphs/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/subgraphs/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/subgraphs/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/subgraphs/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/subgraphs/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/subgraphs/cookbook/grafting-hotfix/) diff --git a/website/src/pages/zh/subgraphs/developing/deploying/_meta.js b/website/src/pages/zh/subgraphs/developing/deploying/_meta.js index c4faacb5e561..eafa80424610 100644 --- a/website/src/pages/zh/subgraphs/developing/deploying/_meta.js +++ b/website/src/pages/zh/subgraphs/developing/deploying/_meta.js @@ -1,5 +1,4 @@ export default { - 'using-subgraph-studio': '', - 'subgraph-studio-faq': '', - 'multiple-networks': '', + 'using-subgraph-studio': 'Deploying with Subgraph Studio', + 'multiple-networks': 'Deploying to Multiple Networks', } diff --git a/website/src/pages/zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx b/website/src/pages/zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx deleted file mode 100644 index 7365bb62a3d8..000000000000 --- a/website/src/pages/zh/subgraphs/developing/deploying/subgraph-studio-faq.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: 子图工作室常见问题 ---- - -## 1. 什么是 Subgraph Studio? - -[子图工作室](https://thegraph.com/studio/)是一个用于创建、管理和发布子图和 API 密钥的 dapp。 - -## 2. 如何创建 API 密钥? - -To create an API, navigate to Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. - -## 3. 我可以创建多个 API 密钥吗? - -是的! 您可以创建多个 API 密钥,以便在不同的项目中使用。查看[此处](https://thegraph.com/studio/apikeys/)的链接。 - -## 4. 如何为 API 密钥限制域? - -在创建 API 密钥之后,可以在安全部分中定义可以查询特定 API 密钥的域。 - -## 5. 我可以把我的子图转给其他所有者吗? - -Yes, subgraphs that have been published to Arbitrum One can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. - -请注意,一旦传输了子图,您将无法在工作室中查看或编辑该子图。 - -## 6. 如果我不是要使用的子图的开发人员,如何查找子图的查询URL? - -You can find the query URL of each subgraph in the Subgraph Details section of Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in Subgraph Studio. - -请记住,你可以创建一个 API 密钥并查询发布到网络上的任何子图,即使你自己建立了一个子图。 这些通过新的 API 密钥进行的查询,与网络上的任何其他查询一样,都是付费查询。 diff --git a/website/src/pages/zh/subgraphs/developing/publishing/_meta.js b/website/src/pages/zh/subgraphs/developing/publishing/_meta.js index 956339c6b49e..ba50fc36da59 100644 --- a/website/src/pages/zh/subgraphs/developing/publishing/_meta.js +++ b/website/src/pages/zh/subgraphs/developing/publishing/_meta.js @@ -1,3 +1,3 @@ export default { - 'publishing-a-subgraph': '', + 'publishing-a-subgraph': 'Publishing to the Decentralized Network', } diff --git a/website/src/pages/zh/subgraphs/querying/_meta.js b/website/src/pages/zh/subgraphs/querying/_meta.js index c933a65f7eb4..ca5ec51d18af 100644 --- a/website/src/pages/zh/subgraphs/querying/_meta.js +++ b/website/src/pages/zh/subgraphs/querying/_meta.js @@ -2,9 +2,9 @@ import titles from './_meta-titles.json' export default { introduction: '', - 'managing-api-keys': '', + 'managing-api-keys': 'Managing API Keys', 'best-practices': '', - 'from-an-application': '', + 'from-an-application': 'Querying From an App', 'distributed-systems': '', 'graphql-api': '', 'subgraph-id-vs-deployment-id': '',