diff --git a/.github/_typos.toml b/.github/_typos.toml
new file mode 100644
index 0000000..fc4986d
--- /dev/null
+++ b/.github/_typos.toml
@@ -0,0 +1,26 @@
+# Configuration for typos spell checker
+# See: https://github.com/crate-ci/typos
+
+[default.extend-words]
+# "gauge" is correct for metrics (typos incorrectly suggests "gage")
+Gauge = "Gauge"
+gauge = "gauge"
+
+[default]
+extend-ignore-re = [
+ # Line ignore: // typos:disable-line or # typos:disable-line
+ "(?Rm)^.*(#|//)\\s*typos:disable-line$",
+ # Block ignore: // typos:off ... // typos:on
+ "(?s)(#|//)\\s*typos:off.*?\\n\\s*(#|//)\\s*typos:on",
+ # Next-line ignore: // typos:ignore-next-line
+ "(#|//)\\s*typos:ignore-next-line\\n.*",
+]
+
+[files]
+extend-exclude = [
+ "*.pb.go",
+ "*.gen.go",
+ "**/testdata/**",
+ "*.svg",
+ "go.sum",
+]
diff --git a/.github/workflows/typos.yaml b/.github/workflows/typos.yaml
new file mode 100644
index 0000000..88c22f0
--- /dev/null
+++ b/.github/workflows/typos.yaml
@@ -0,0 +1,20 @@
+name: Spell Check
+
+on:
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ typos:
+ name: Check spelling
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Check spelling with typos
+ uses: crate-ci/typos@v1.28.4
+ with:
+ config: .github/_typos.toml
diff --git a/cloud/observability/README.md b/cloud/observability/README.md
index da40f84..b3e80d8 100644
--- a/cloud/observability/README.md
+++ b/cloud/observability/README.md
@@ -95,7 +95,7 @@ sequenceDiagram
Gathering ->> Temporal Server: scrape
Gathering ->> Processing: push
end
- Note right of Processing: Gathering is
gobally distributed
and pushed at
various times
+ Note right of Processing: Gathering is
globally distributed
and pushed at
various times
loop 30s interval
Processing ->> Processing: aggregate
Note right of Processing: Aggregation is over
a look back
diff --git a/cloud/observability/promql-to-dd-go/examples/datadog_dashboard.json b/cloud/observability/promql-to-dd-go/examples/datadog_dashboard.json
index c2b5ff3..a78a15e 100644
--- a/cloud/observability/promql-to-dd-go/examples/datadog_dashboard.json
+++ b/cloud/observability/promql-to-dd-go/examples/datadog_dashboard.json
@@ -420,7 +420,7 @@
{
"id": 6576468107016944,
"definition": {
- "title": "StartWorkflowExecutino Latency",
+ "title": "StartWorkflowExecution Latency",
"title_size": "16",
"title_align": "left",
"show_legend": true,
diff --git a/cloud/observability/promql-to-dd-go/worker/utils_test.go b/cloud/observability/promql-to-dd-go/worker/utils_test.go
index b75d7fa..18d00a8 100644
--- a/cloud/observability/promql-to-dd-go/worker/utils_test.go
+++ b/cloud/observability/promql-to-dd-go/worker/utils_test.go
@@ -57,7 +57,7 @@ func TestPromHistogramToDatadogGauge(t *testing.T) {
},
},
{
- name: "contains NaN vlaues",
+ name: "contains NaN values",
metricName: "latency",
quantile: 0.5,
matrix: model.Matrix{
diff --git a/cloud/observability/promql-to-dd-ts/src/index.ts b/cloud/observability/promql-to-dd-ts/src/index.ts
index cb7c83a..e2abf52 100644
--- a/cloud/observability/promql-to-dd-ts/src/index.ts
+++ b/cloud/observability/promql-to-dd-ts/src/index.ts
@@ -188,7 +188,7 @@ const queryPrometheusHistogram = async (
return queryResponseDataSchema.parse(response.data).data
}
-const convertPrometheusHistogramToDatadogGuageSeries = (
+const convertPrometheusHistogramToDatadogGaugeSeries = (
metricName: string,
quantile: number,
metricData: MetricData,
@@ -196,7 +196,7 @@ const convertPrometheusHistogramToDatadogGuageSeries = (
metricData.result.map(prometheusMetric => ({
// Make it easier for the datadog user to understand what this metric is
metric: DATADOG_METRIC_PREFIX + metricName.split('_bucket')[0] + '_P' + quantile * 100,
- // Type 2 is a "guage" metric
+ // Type 3 is a "gauge" metric
type: 3,
points: prometheusMetric.values.map(([timestamp, value]) => {
return {
@@ -246,9 +246,9 @@ const main = async () => {
),
)).flat()
- const guageSeries = (await Promise.all(histogramMetricNames.map(async metricName =>
+ const gaugeSeries = (await Promise.all(histogramMetricNames.map(async metricName =>
Promise.all(HISTOGRAM_QUANTILES.map(async quantile =>
- convertPrometheusHistogramToDatadogGuageSeries(
+ convertPrometheusHistogramToDatadogGaugeSeries(
metricName,
quantile,
await queryPrometheusHistogram(metricName, quantile, generateQueryWindow())
@@ -259,7 +259,7 @@ const main = async () => {
console.log({ level: 'info', message: 'Submitting metrics to Datadog' })
await datadogMetricsApi.submitMetrics({ body: { series: [
...countSeries,
- ...guageSeries,
+ ...gaugeSeries,
]}})
console.log({ level: 'info', message: 'Pausing for 20s' })
diff --git a/cloud/observability/promql-to-dd.py b/cloud/observability/promql-to-dd.py
index 94a286a..ad1fa9d 100755
--- a/cloud/observability/promql-to-dd.py
+++ b/cloud/observability/promql-to-dd.py
@@ -3,7 +3,7 @@
promql-to-dd.py - Import counters and histograms from prometheus api endpoint into datadog
While this demonstrates how to import prometheus api data using the datadog metrics API,
-there is a lot of room for improvement in terms of efficency and error handling.
+there is a lot of room for improvement in terms of efficiency and error handling.
To view this data in DataDog Metrics:
* use sum and as_rate for rate metrics
@@ -105,7 +105,7 @@ def retryable_submit_metrics(datadog_api: MetricsApi, body: MetricPayload):
def submit_datadog_series(datadog_api: MetricsApi, series: Iterable):
print(f"{datetime.now()}: Ingesting {len(series)} series into DataDog")
- # submit 200 series at a time as a naieve optimization
+ # submit 200 series at a time as a naive optimization
# this could be tuned to submit upto 5MB of metrics
# data compressed to a size of upto 512KB
non_empty_responses = []
diff --git a/tls/tls-simple/generate-test-certs.sh b/tls/tls-simple/generate-test-certs.sh
index 6ccd534..da88165 100755
--- a/tls/tls-simple/generate-test-certs.sh
+++ b/tls/tls-simple/generate-test-certs.sh
@@ -1,5 +1,5 @@
# This scripts generates test keys and certificates for the sample.
-# In a production environment such artifacts should be genrated
+# In a production environment such artifacts should be generated
# by a proper certificate authority and handled in a secure manner.
CERTS_DIR=./certs