@@ -3,7 +3,7 @@ title: GraphQL Schema Usage Insights
33---
44
55import NextImage from ' next/image'
6- import { Callout } from ' @theguild/components'
6+ import { Callout , Tabs } from ' @theguild/components'
77import usageClientsImage from ' ../../../public/docs/pages/features/usage-clients.png'
88import usageLatencyImage from ' ../../../public/docs/pages/features/usage-latency-over-time.png'
99import usageOperationsOverTimeImage from ' ../../../public/docs/pages/features/usage-operations-over-time.png'
@@ -18,10 +18,12 @@ following purposes:
1818
19191 . ** Monitoring and Observability** : view a list of all your GraphQL operations and their
2020 performance, error-rate, and other metrics.
21- 2 . ** Schema Usage and Coverage** : understand how your consumers are using your GraphQL schema, and
21+ 2 . ** Tracing** : view detailed traces of your GraphQL operations, with breakdown and timing of
22+ subgraph upstream requests.
23+ 3 . ** Schema Usage and Coverage** : understand how your consumers are using your GraphQL schema, and
2224 what parts of the schema are not being used at all (see
2325 [ Schema Usage and Coverage] ( /docs/schema-registry#schema-explorer ) ).
24- 3 . ** Schema Evolution** : with the knowledge of what GraphQL fields are being used, you can
26+ 4 . ** Schema Evolution** : with the knowledge of what GraphQL fields are being used, you can
2527 confidently evolve your schema without breaking your consumers (see
2628 [ Conditional Breaking Changes] ( /docs/management/targets#conditional-breaking-changes ) ).
2729
@@ -126,3 +128,276 @@ performance:
126128 src = { usageLatencyImage }
127129 className = " mt-10 max-w-2xl rounded-lg drop-shadow-md"
128130/>
131+
132+ ## Tracing
133+
134+ In addition to usage report, you can obtain more details about your operation performances by
135+ reporting complete tracing data to Hive Tracing.
136+
137+ Hive Tracing shows the list of all recorded GraphQL operations executed by your customers, with all
138+ the associated spans. This can allow you to understand what takes time in an operation detected as
139+ slow in Hive Insights. It also allow to see the actual sub-queries sent to subgraphs for a given
140+ GraphQL operation.
141+
142+ ### Setup in Hive Gateway
143+
144+ Hive Tracing is built on top of OpenTelemetry and is integrated out of the box in Hive Gateway.
145+
146+ To enable traces reporting, you can either use cli options, environment variables or config file:
147+
148+ <Tabs items = { [" CLI options" , " Environment variables" , " Configuration file" ]} >
149+
150+ <Tabs.Tab >
151+
152+ ``` bash
153+ hive-gateway supergraph --hive-target=" xxx" --hive-trace-access-token=" xxx"
154+ ```
155+
156+ </Tabs.Tab >
157+
158+ <Tabs.Tab >
159+
160+ ``` bash
161+ HIVE_HIVE_TRACE_ACCESS_TOKEN=" xxx" HIVE_TARGET=" xxx" hive-gateway supergraph
162+ ```
163+
164+ </Tabs.Tab >
165+
166+ <Tabs.Tab >
167+
168+ ``` ts filename="gateway.config.ts"
169+ import { defineConfig } from ' @graphql-hive/gateway'
170+ import { hiveTracingSetup } from ' @graphql-hive/gateway/opentelemetry/setup'
171+
172+ hiveTracingSetup ({ accessToken: ' YOUR_ACCESS_TOKEN' , target: ' YOUR_TARGET' })
173+
174+ export const gatewayConfig = defineConfig ({
175+ openTelemetry: {
176+ traces: true
177+ }
178+ })
179+ ```
180+
181+ </Tabs.Tab >
182+
183+ </Tabs >
184+
185+ ### Advanced Configuration
186+
187+ The integration has sain defaults for a production ready setup, but you can also customize it to
188+ better suite your specific needs. Advanced configuration requires the use of a config file
189+ (` gateway.config.ts ` ).
190+
191+ It is highly recommended to place the telemetry setup in its own file, and import it as the very
192+ first import in ` gateway.config.ts ` . This is to ensure that any OTEL compatible third party library
193+ that you use are properly instrumented.
194+
195+ ``` ts filename="telemetry.ts"
196+ import { hiveTracingSetup } from ' @graphql-hive/gateway/opentelemetry/setup'
197+
198+ hiveTracingSetup ({ accessToken: ' xxx' , target: ' xxx' })
199+ ```
200+
201+ ``` ts filename="gateway.config.ts"
202+ import ' ./telemetry.ts'
203+ import { defineConfig } from ' @graphql-hive/gateway'
204+
205+ export const gatewayConfig = defineConfig ({
206+ openTelemetry: {
207+ traces: true
208+ }
209+ })
210+ ```
211+
212+ #### Service Name and version
213+
214+ You can provide a service name, either by using standard ` OTEL_SERVICE_NAME ` and
215+ ` OTEL_SERVICE_VERSION ` or by providing them programmatically via setup options
216+
217+ ``` ts filename="telemetry.ts"
218+ import { hiveTracingSetup } from ' @graphql-hive/gateway/opentelemetry/setup'
219+
220+ hiveTracingSetup ({
221+ resource: {
222+ serviceName: ' my-service' ,
223+ serviceVersion: ' 1.0.0'
224+ }
225+ })
226+ ```
227+
228+ #### Custom resource attributes
229+
230+ Resource attributes can be defined by providing a ` Resource ` instance to the setup ` resource `
231+ option.
232+
233+ This resource will be merged with the resource created from env variables, which means
234+ ` service.name ` and ` service.version ` are not mandatory if already provided through environment
235+ variables.
236+
237+ ``` sh npm2yarn
238+ npm i @opentelemetry/resources # Not needed with Docker image
239+ ```
240+
241+ ``` ts filename="telemetry.ts"
242+ import { hiveTracingSetup } from ' @graphql-hive/gateway/opentelemetry/setup'
243+ import { resourceFromAttributes } from ' @opentelemetry/resources'
244+
245+ hiveTracingSetup ({
246+ resource: resourceFromAttributes ({
247+ ' custom.attribute' : ' my custom value'
248+ })
249+ })
250+ ```
251+
252+ #### Span Batching
253+
254+ By default, if you provide only a Trace Exporter, it will be wrapped into a ` BatchSpanProcessor ` to
255+ batch spans together and reduce the number of request to you backend.
256+
257+ This is an important feature for a real world production environment, and you can configure its
258+ behavior to exactly suites your infrastructure limits.
259+
260+ By default, the batch processor will send the spans every 5 seconds or when the buffer is full.
261+
262+ The following configuration are allowed:
263+
264+ - ` true ` (default): enables batching and use
265+ [ ` BatchSpanProcessor ` ] ( https://opentelemetry.io/docs/specs/otel/trace/sdk/#batching-processor )
266+ default config.
267+ - ` object ` : enables batching and use
268+ [ ` BatchSpanProcessor ` ] ( https://opentelemetry.io/docs/specs/otel/trace/sdk/#batching-processor )
269+ with the provided configuration.
270+ - ` false ` - disables batching and use
271+ [ ` SimpleSpanProcessor ` ] ( https://opentelemetry.io/docs/specs/otel/trace/sdk/#simple-processor )
272+
273+ ``` ts filename="telemetry.ts"
274+ import { hiveTracingSetup } from ' @graphql-hive/gateway/opentelemetry/setup'
275+
276+ hiveTracingSetup ({
277+ batching: {
278+ exportTimeoutMillis: 30_000 , // Default to 30_000ms
279+ maxExportBatchSize: 512 , // Default to 512 spans
280+ maxQueueSize: 2048 , // Default to 2048 spans
281+ scheduledDelayMillis: 5_000 // Default to 5_000ms
282+ }
283+ })
284+ ```
285+
286+ #### Sampling
287+
288+ When your gateway have a lot of traffic, tracing every requests can become a very expensive
289+ approach.
290+
291+ A mitigation for this problem is to trace only some requests, using a strategy to choose which
292+ request to trace or not.
293+
294+ The most common strategy is to combine both a parent first (a span is picked if parent is picked)
295+ and a ratio based on trace id (each trace, one by request, have a chance to be picked, with a given
296+ rate).
297+
298+ By default, all requests are traced. You can either provide you own Sampler, or provide a sampling
299+ rate which will be used to setup a Parent + TraceID Ratio strategy.
300+
301+ ``` ts filename="telemetry.ts"
302+ import { hiveTracingSetup } from ' @graphql-hive/gateway/opentelemetry/setup'
303+ import { JaegerRemoteSampler } from ' @opentelemetry/sampler-jaeger-remote'
304+ import { AlwaysOnSampler } from ' @opentelemetry/sdk-trace-base'
305+
306+ hiveTracingSetup ({
307+ // Use Parent + TraceID Ratio strategy
308+ samplingRate: 0.1 ,
309+
310+ // Or use a custom Sampler
311+ sampler: new JaegerRemoteSampler ({
312+ endpoint: ' http://your-jaeger-agent:14268/api/sampling' ,
313+ serviceName: ' your-service-name' ,
314+ initialSampler: new AlwaysOnSampler (),
315+ poolingInterval: 60000 // 60 seconds
316+ })
317+ })
318+ ```
319+
320+ #### Limits
321+
322+ To ensure that you don't overwhelm your tracing ingestion infrastructure, you can set limits for
323+ both cardinality and amount of data the OpenTelemetry SDK will be allowed to generate.
324+
325+ ``` ts filename="telemetry.ts"
326+ import { hiveTracingSetup } from ' @graphql-hive/gateway/opentelemetry/setup'
327+
328+ hiveTracingSetup ({
329+ generalLimits: {
330+ // ...
331+ },
332+ spanLimits: {
333+ // ...
334+ }
335+ })
336+ ```
337+
338+ #### Spans, Events and Attributes
339+
340+ For more details about Spans, Events and Attributes configuration, please refer to
341+ [ ` Monitoring and Tracing documentation ` ] ( /docs/gateway/monitoring-tracing#configuration ) .
342+
343+ ### Manual OpenTelemetry Setup
344+
345+ If you have an existing OpenTelemetry setup and want to send your traces to both Hive Tracing and
346+ your own OTEl backend, you can use ` HiveTracingSpanProcessor ` .
347+
348+ For more information about setting up OpenTelemetry manually, please refer to
349+ [ ` Monitoring and Tracing documentation ` ] ( /docs/gateway/monitoring-tracing#service-name-and-version )
350+
351+ <Tabs items = { [<div >Hive Gateway <code >openTelemetrySetup()</code > (recommended)</div >, <div >OpenTelemetry <code >NodeSDK</code ></div >]} >
352+
353+ <Tabs.Tab >
354+
355+ ``` ts filename="telemetry.ts"
356+ import {
357+ HiveTracingSpanProcessor ,
358+ openTelemetrySetup
359+ } from ' @graphql-hive/gateway/opentelemetry/setup'
360+ import { AsyncLocalStorageContextManager } from ' @opentelemetry/context-async-hooks'
361+
362+ openTelemetrySetup ({
363+ contextManager: new AsyncLocalStorageContextManager (),
364+ traces: {
365+ // Define your span processors.
366+ processors: [
367+ new HiveTracingSpanProcessor ({
368+ endpoint: ' https://api.graphql-hive.com/otel/v1/traces' ,
369+ target: process .env [' HIVE_TARGET' ],
370+ accessToken: process .env [' HIVE_TRACES_ACCESS_TOKEN' ]
371+ })
372+
373+ // ... your processors
374+ ]
375+ }
376+ })
377+ ```
378+
379+ </Tabs.Tab >
380+
381+ <Tabs.Tab >
382+
383+ ``` ts filename="telemetry.ts"
384+ import { HiveTracingSpanProcessor } from ' @graphql-hive/gateway/opentelemetry/setup'
385+ import { NodeSDK } from ' @opentelemetry/sdk-node'
386+
387+ new NodeSDK ({
388+ // Define your processors
389+ spanProcessors: [
390+ new HiveTracingSpanProcessor ({
391+ endpoint: ' https://api.graphql-hive.com/otel/v1/traces' ,
392+ target: process .env [' HIVE_TARGET' ],
393+ accessToken: process .env [' HIVE_TRACES_ACCESS_TOKEN' ]
394+ })
395+
396+ // ... your processors
397+ ]
398+ }).start ()
399+ ```
400+
401+ </Tabs.Tab >
402+
403+ </Tabs >
0 commit comments