diff --git a/.gitignore b/.gitignore
index 96012efb78..ca12a40d2d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -62,4 +62,14 @@ tools/**
Resource.designer.cs
# Tests
-TestResults
\ No newline at end of file
+TestResults
+
+## Mac OS
+
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Thumbnails
+._*
diff --git a/BenchmarkDotNet.sln.DotSettings b/BenchmarkDotNet.sln.DotSettings
index 53f5cf0a60..7c0f8a2b02 100644
--- a/BenchmarkDotNet.sln.DotSettings
+++ b/BenchmarkDotNet.sln.DotSettings
@@ -161,6 +161,7 @@
True
True
True
+ True
True
True
True
@@ -185,6 +186,7 @@
True
True
True
+ True
True
True
True
diff --git a/src/BenchmarkDotNet/Attributes/Exporters/OpenMetricsExporterAttribute.cs b/src/BenchmarkDotNet/Attributes/Exporters/OpenMetricsExporterAttribute.cs
new file mode 100644
index 0000000000..f23bd5cf19
--- /dev/null
+++ b/src/BenchmarkDotNet/Attributes/Exporters/OpenMetricsExporterAttribute.cs
@@ -0,0 +1,14 @@
+using BenchmarkDotNet.Exporters;
+using BenchmarkDotNet.Exporters.OpenMetrics;
+using JetBrains.Annotations;
+
+namespace BenchmarkDotNet.Attributes
+{
+ [PublicAPI]
+ public class OpenMetricsExporterAttribute : ExporterConfigBaseAttribute
+ {
+ public OpenMetricsExporterAttribute() : base(OpenMetricsExporter.Default)
+ {
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/BenchmarkDotNet/Exporters/DefaultExporters.cs b/src/BenchmarkDotNet/Exporters/DefaultExporters.cs
index 50fbb8714c..dc4acada77 100644
--- a/src/BenchmarkDotNet/Exporters/DefaultExporters.cs
+++ b/src/BenchmarkDotNet/Exporters/DefaultExporters.cs
@@ -1,5 +1,6 @@
using BenchmarkDotNet.Exporters.Csv;
using BenchmarkDotNet.Exporters.Json;
+using BenchmarkDotNet.Exporters.OpenMetrics;
using BenchmarkDotNet.Exporters.Xml;
using JetBrains.Annotations;
@@ -12,6 +13,7 @@ public static class DefaultExporters
[PublicAPI] public static readonly IExporter CsvMeasurements = CsvMeasurementsExporter.Default;
[PublicAPI] public static readonly IExporter Html = HtmlExporter.Default;
[PublicAPI] public static readonly IExporter Markdown = MarkdownExporter.Default;
+ [PublicAPI] public static readonly IExporter OpenMetrics = OpenMetricsExporter.Default;
[PublicAPI] public static readonly IExporter Plain = PlainExporter.Default;
[PublicAPI] public static readonly IExporter RPlot = RPlotExporter.Default;
diff --git a/src/BenchmarkDotNet/Exporters/OpenMetrics/OpenMetricsExporter.cs b/src/BenchmarkDotNet/Exporters/OpenMetrics/OpenMetricsExporter.cs
new file mode 100644
index 0000000000..34b861bb43
--- /dev/null
+++ b/src/BenchmarkDotNet/Exporters/OpenMetrics/OpenMetricsExporter.cs
@@ -0,0 +1,322 @@
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Linq;
+using BenchmarkDotNet.Loggers;
+using BenchmarkDotNet.Parameters;
+using BenchmarkDotNet.Reports;
+using BenchmarkDotNet.Running;
+using System;
+using System.Text;
+using BenchmarkDotNet.Engines;
+using BenchmarkDotNet.Extensions;
+using BenchmarkDotNet.Mathematics;
+
+namespace BenchmarkDotNet.Exporters.OpenMetrics;
+
+public class OpenMetricsExporter : ExporterBase
+{
+ private const string MetricPrefix = "benchmark_";
+ protected override string FileExtension => "metrics";
+ protected override string FileCaption => "openmetrics";
+
+ public static readonly IExporter Default = new OpenMetricsExporter();
+
+ public override void ExportToLog(Summary summary, ILogger logger)
+ {
+ var metricsSet = new HashSet();
+
+ foreach (var report in summary.Reports)
+ {
+ var benchmark = report.BenchmarkCase;
+ var gcStats = report.GcStats;
+ var descriptor = benchmark.Descriptor;
+ var parameters = benchmark.Parameters;
+
+ var stats = report.ResultStatistics;
+ var metrics = report.Metrics;
+ if (stats == null)
+ continue;
+
+ AddCommonMetrics(metricsSet, descriptor, parameters, stats, gcStats);
+ AddAdditionalMetrics(metricsSet, metrics, descriptor, parameters);
+ }
+
+ WriteMetricsToLogger(logger, metricsSet);
+ }
+
+ private static void AddCommonMetrics(HashSet metricsSet, Descriptor descriptor, ParameterInstances parameters, Statistics stats, GcStats gcStats)
+ {
+ metricsSet.AddRange([
+ // Mean
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}execution_time_nanoseconds",
+ "Mean execution time in nanoseconds.",
+ "gauge",
+ "nanoseconds",
+ descriptor,
+ parameters,
+ stats.Mean),
+ // Error
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}error_nanoseconds",
+ "Standard error of the mean execution time in nanoseconds.",
+ "gauge",
+ "nanoseconds",
+ descriptor,
+ parameters,
+ stats.StandardError),
+ // Standard Deviation
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}stddev_nanoseconds",
+ "Standard deviation of execution time in nanoseconds.",
+ "gauge",
+ "nanoseconds",
+ descriptor,
+ parameters,
+ stats.StandardDeviation),
+ // GC Stats Gen0 - these are counters, not gauges
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}gc_gen0_collections_total",
+ "Total number of Gen 0 garbage collections during the benchmark execution.",
+ "counter",
+ "",
+ descriptor,
+ parameters,
+ gcStats.Gen0Collections),
+ // GC Stats Gen1
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}gc_gen1_collections_total",
+ "Total number of Gen 1 garbage collections during the benchmark execution.",
+ "counter",
+ "",
+ descriptor,
+ parameters,
+ gcStats.Gen1Collections),
+ // GC Stats Gen2
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}gc_gen2_collections_total",
+ "Total number of Gen 2 garbage collections during the benchmark execution.",
+ "counter",
+ "",
+ descriptor,
+ parameters,
+ gcStats.Gen2Collections),
+ // Total GC Operations
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}gc_total_operations_total",
+ "Total number of garbage collection operations during the benchmark execution.",
+ "counter",
+ "",
+ descriptor,
+ parameters,
+ gcStats.TotalOperations),
+ // P90 - in nanoseconds
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}p90_nanoseconds",
+ "90th percentile execution time in nanoseconds.",
+ "gauge",
+ "nanoseconds",
+ descriptor,
+ parameters,
+ stats.Percentiles.P90),
+ // P95 - in nanoseconds
+ OpenMetric.FromStatistics(
+ $"{MetricPrefix}p95_nanoseconds",
+ "95th percentile execution time in nanoseconds.",
+ "gauge",
+ "nanoseconds",
+ descriptor,
+ parameters,
+ stats.Percentiles.P95)
+ ]);
+ }
+
+ private static void AddAdditionalMetrics(HashSet metricsSet, IReadOnlyDictionary metrics, Descriptor descriptor, ParameterInstances parameters)
+ {
+ var reservedMetricNames = new HashSet
+ {
+ $"{MetricPrefix}execution_time_nanoseconds",
+ $"{MetricPrefix}error_nanoseconds",
+ $"{MetricPrefix}stddev_nanoseconds",
+ $"{MetricPrefix}gc_gen0_collections_total",
+ $"{MetricPrefix}gc_gen1_collections_total",
+ $"{MetricPrefix}gc_gen2_collections_total",
+ $"{MetricPrefix}gc_total_operations_total",
+ $"{MetricPrefix}p90_nanoseconds",
+ $"{MetricPrefix}p95_nanoseconds"
+ };
+
+ foreach (var metric in metrics)
+ {
+ string metricName = SanitizeMetricName(metric.Key);
+ string fullMetricName = $"{MetricPrefix}{metricName}";
+
+ if (reservedMetricNames.Contains(fullMetricName))
+ continue;
+
+ metricsSet.Add(OpenMetric.FromMetric(
+ fullMetricName,
+ metric,
+ "gauge", // Assuming all additional metrics are of type "gauge"
+ descriptor,
+ parameters));
+ }
+ }
+
+ private static void WriteMetricsToLogger(ILogger logger, HashSet metricsSet)
+ {
+ var emittedHelpType = new HashSet();
+
+ foreach (var metric in metricsSet.OrderBy(m => m.Name))
+ {
+ if (!emittedHelpType.Contains(metric.Name))
+ {
+ logger.WriteLine($"# HELP {metric.Name} {metric.Help}");
+ logger.WriteLine($"# TYPE {metric.Name} {metric.Type}");
+ if (!string.IsNullOrEmpty(metric.Unit))
+ {
+ logger.WriteLine($"# UNIT {metric.Name} {metric.Unit}");
+ }
+ emittedHelpType.Add(metric.Name);
+ }
+
+ logger.WriteLine(metric.ToString());
+ }
+
+ logger.WriteLine("# EOF");
+ }
+
+ private static string SanitizeMetricName(string name)
+ {
+ var builder = new StringBuilder();
+ bool lastWasUnderscore = false;
+
+ foreach (char c in name.ToLowerInvariant())
+ {
+ if (char.IsLetterOrDigit(c) || c == '_')
+ {
+ builder.Append(c);
+ lastWasUnderscore = false;
+ }
+ else if (!lastWasUnderscore)
+ {
+ builder.Append('_');
+ lastWasUnderscore = true;
+ }
+ }
+
+ string? result = builder.ToString().Trim('_'); // <-- Trim here
+
+ if (result.Length > 0 && char.IsDigit(result[0]))
+ result = "_" + result;
+
+ return result;
+ }
+
+ private class OpenMetric : IEquatable
+ {
+ internal string Name { get; }
+ internal string Help { get; }
+ internal string Type { get; }
+ internal string Unit { get; }
+ private readonly ImmutableSortedDictionary labels;
+ private readonly double value;
+
+ private OpenMetric(string name, string help, string type, string unit, ImmutableSortedDictionary labels, double value)
+ {
+ if (string.IsNullOrWhiteSpace(name)) throw new ArgumentException("Metric name cannot be null or empty.");
+ if (string.IsNullOrWhiteSpace(type)) throw new ArgumentException("Metric type cannot be null or empty.");
+
+ Name = name;
+ Help = help;
+ Type = type;
+ Unit = unit ?? "";
+ this.labels = labels ?? throw new ArgumentNullException(nameof(labels));
+ this.value = value;
+ }
+
+ public static OpenMetric FromStatistics(string name, string help, string type, string unit, Descriptor descriptor, ParameterInstances parameters, double value)
+ {
+ var labels = BuildLabelDict(descriptor, parameters);
+ return new OpenMetric(name, help, type, unit, labels, value);
+ }
+
+ public static OpenMetric FromMetric(string fullMetricName, KeyValuePair metric, string type, Descriptor descriptor, ParameterInstances parameters)
+ {
+ string help = $"Additional metric {metric.Key}";
+ var labels = BuildLabelDict(descriptor, parameters);
+ return new OpenMetric(fullMetricName, help, type, "", labels, metric.Value.Value);
+ }
+
+ private static readonly Dictionary NormalizedLabelKeyCache = new();
+ private static string NormalizeLabelKey(string key)
+ {
+ string normalized = new(key
+ .ToLowerInvariant()
+ .Select(c => char.IsLetterOrDigit(c) ? c : '_')
+ .ToArray());
+ return normalized;
+ }
+
+ private static ImmutableSortedDictionary BuildLabelDict(Descriptor descriptor, ParameterInstances parameters)
+ {
+ var dict = new SortedDictionary
+ {
+ ["method"] = descriptor.WorkloadMethod.Name,
+ ["type"] = descriptor.TypeInfo
+ };
+ foreach (var param in parameters.Items)
+ {
+ string key = NormalizeLabelKey(param.Name);
+ string value = EscapeLabelValue(param.Value?.ToString() ?? "");
+ dict[key] = value;
+ }
+ return dict.ToImmutableSortedDictionary();
+ }
+
+ private static string EscapeLabelValue(string value)
+ {
+ return value.Replace("\\", @"\\")
+ .Replace("\"", "\\\"")
+ .Replace("\n", "\\n")
+ .Replace("\r", "\\r")
+ .Replace("\t", "\\t");
+ }
+
+ public override bool Equals(object? obj) => Equals(obj as OpenMetric);
+
+ public bool Equals(OpenMetric? other)
+ {
+ if (other is null)
+ return false;
+
+ return Name == other.Name
+ && value.Equals(other.value)
+ && labels.Count == other.labels.Count
+ && labels.All(kv => other.labels.TryGetValue(kv.Key, out string? otherValue) && kv.Value == otherValue);
+ }
+
+ public override int GetHashCode()
+ {
+ var hash = new HashCode();
+ hash.Add(Name);
+ hash.Add(value);
+
+ foreach (var kv in labels)
+ {
+ hash.Add(kv.Key);
+ hash.Add(kv.Value);
+ }
+
+ return hash.ToHashCode();
+ }
+
+ public override string ToString()
+ {
+ string labelStr = labels.Count > 0
+ ? $"{{{string.Join(", ", labels.Select(kvp => $"{kvp.Key}=\"{kvp.Value}\""))}}}"
+ : string.Empty;
+ return $"{Name}{labelStr} {value}";
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/BenchmarkDotNet.IntegrationTests/ValidatorsTest.cs b/tests/BenchmarkDotNet.IntegrationTests/ValidatorsTest.cs
index d6de979310..061d594e3a 100644
--- a/tests/BenchmarkDotNet.IntegrationTests/ValidatorsTest.cs
+++ b/tests/BenchmarkDotNet.IntegrationTests/ValidatorsTest.cs
@@ -8,6 +8,7 @@
using Xunit;
using Xunit.Abstractions;
using BenchmarkDotNet.Exporters.Json;
+using BenchmarkDotNet.Exporters.OpenMetrics;
using BenchmarkDotNet.Exporters.Xml;
namespace BenchmarkDotNet.IntegrationTests
@@ -23,6 +24,7 @@ public ValidatorsTest(ITestOutputHelper output) : base(output) { }
MarkdownExporter.Default,
MarkdownExporter.GitHub,
MarkdownExporter.StackOverflow,
+ OpenMetricsExporter.Default,
CsvExporter.Default,
CsvMeasurementsExporter.Default,
HtmlExporter.Default,
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/CommonExporterVerifyTests.cs b/tests/BenchmarkDotNet.Tests/Exporters/CommonExporterVerifyTests.cs
index 7799fd2038..9cd91d4ec2 100644
--- a/tests/BenchmarkDotNet.Tests/Exporters/CommonExporterVerifyTests.cs
+++ b/tests/BenchmarkDotNet.Tests/Exporters/CommonExporterVerifyTests.cs
@@ -8,10 +8,10 @@
using BenchmarkDotNet.Diagnosers;
using BenchmarkDotNet.Exporters;
using BenchmarkDotNet.Exporters.Json;
+using BenchmarkDotNet.Exporters.OpenMetrics;
using BenchmarkDotNet.Exporters.Xml;
using BenchmarkDotNet.Loggers;
using BenchmarkDotNet.Reports;
-using BenchmarkDotNet.Tests.Builders;
using BenchmarkDotNet.Tests.Infra;
using BenchmarkDotNet.Tests.Mocks;
using BenchmarkDotNet.Tests.Reports;
@@ -95,6 +95,7 @@ private static IEnumerable GetExporters()
yield return MarkdownExporter.Console;
yield return MarkdownExporter.GitHub;
yield return MarkdownExporter.StackOverflow;
+ yield return OpenMetricsExporter.Default;
yield return PlainExporter.Default;
yield return XmlExporter.Brief;
yield return XmlExporter.BriefCompressed;
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/MarkdownExporterVerifyTests.cs b/tests/BenchmarkDotNet.Tests/Exporters/MarkdownExporterVerifyTests.cs
index 2d57e308a0..ab6fb25199 100644
--- a/tests/BenchmarkDotNet.Tests/Exporters/MarkdownExporterVerifyTests.cs
+++ b/tests/BenchmarkDotNet.Tests/Exporters/MarkdownExporterVerifyTests.cs
@@ -9,7 +9,6 @@
using BenchmarkDotNet.Tests.Mocks;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Configs;
-using BenchmarkDotNet.Tests.Builders;
using BenchmarkDotNet.Tests.Infra;
using BenchmarkDotNet.Validators;
using JetBrains.Annotations;
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/OpenMetricsExporterTests.cs b/tests/BenchmarkDotNet.Tests/Exporters/OpenMetricsExporterTests.cs
new file mode 100644
index 0000000000..25cfd8fb1d
--- /dev/null
+++ b/tests/BenchmarkDotNet.Tests/Exporters/OpenMetricsExporterTests.cs
@@ -0,0 +1,244 @@
+using System.Threading.Tasks;
+using System;
+using System.Collections.Generic;
+using System.Collections.Immutable;
+using System.Globalization;
+using BenchmarkDotNet.Columns;
+using BenchmarkDotNet.Configs;
+using BenchmarkDotNet.Engines;
+using BenchmarkDotNet.Environments;
+using BenchmarkDotNet.Exporters.OpenMetrics;
+using BenchmarkDotNet.Jobs;
+using BenchmarkDotNet.Loggers;
+using BenchmarkDotNet.Parameters;
+using BenchmarkDotNet.Reports;
+using BenchmarkDotNet.Running;
+using BenchmarkDotNet.Tests.Infra;
+using BenchmarkDotNet.Tests.Mocks;
+using BenchmarkDotNet.Tests.Reports;
+using BenchmarkDotNet.Toolchains.Results;
+using BenchmarkDotNet.Validators;
+using VerifyXunit;
+using Xunit;
+
+namespace BenchmarkDotNet.Tests.Exporters
+{
+ [Collection("VerifyTests")]
+ public class OpenMetricsExporterTests
+ {
+ [Fact]
+ public Task SingleBenchmark_ProducesHelpAndTypeOnce()
+ {
+ var summary = new Summary(
+ "SingleBenchmarkSummary",
+ [
+ new BenchmarkReport(
+ success: true,
+ benchmarkCase: new BenchmarkCase(
+ new Descriptor(MockFactory.MockType, MockFactory.MockMethodInfo),
+ Job.Dry,
+ new ParameterInstances([
+ new ParameterInstance(new ParameterDefinition("param1", false, ["Parameter 1"], true, typeof(string), 0 ), "value1", SummaryStyle.Default),
+ ]),
+ ImmutableConfigBuilder.Create(new ManualConfig())),
+ null,
+ null,
+ [
+ new ExecuteResult([
+ new Measurement(0, IterationMode.Workload, IterationStage.Result, 1, 10, 1)
+ ])
+ ],
+ new List
+ {
+ new(new FakeMetricDescriptor("label", "label"), 42.0)
+ }),
+ new BenchmarkReport(
+ success: true,
+ benchmarkCase: new BenchmarkCase(
+ new Descriptor(MockFactory.MockType, MockFactory.MockMethodInfo),
+ Job.Dry,
+ new ParameterInstances([
+ new ParameterInstance(new ParameterDefinition("param1", false, ["Parameter 1"], true, typeof(string), 0 ), "value2", SummaryStyle.Default),
+ ]),
+ ImmutableConfigBuilder.Create(new ManualConfig())),
+ null,
+ null,
+ [
+ new ExecuteResult([
+ new Measurement(0, IterationMode.Workload, IterationStage.Result, 1, 10, 1)
+ ])
+ ],
+ new List
+ {
+ new(new FakeMetricDescriptor("label", "label"), 42.0)
+ }),
+ new BenchmarkReport(
+ success: true,
+ benchmarkCase: new BenchmarkCase(
+ new Descriptor(MockFactory.MockType, MockFactory.MockMethodInfo),
+ Job.Dry,
+ new ParameterInstances([
+ new ParameterInstance(new ParameterDefinition("param1", false, ["Parameter 1"], true, typeof(string), 0 ), "value3", SummaryStyle.Default),
+ ]),
+ ImmutableConfigBuilder.Create(new ManualConfig())),
+ null,
+ null,
+ [
+ new ExecuteResult([
+ new Measurement(0, IterationMode.Workload, IterationStage.Result, 1, 10, 1)
+ ])
+ ],
+ new List
+ {
+ new(new FakeMetricDescriptor("label", "label"), 42.0)
+ })
+ ],
+ HostEnvironmentInfo.GetCurrent(),
+ "",
+ "",
+ TimeSpan.Zero,
+ CultureInfo.InvariantCulture,
+ ImmutableArray.Empty,
+ ImmutableArray.Empty);
+
+ var logger = new AccumulationLogger();
+
+ OpenMetricsExporter.Default.ExportToLog(summary, logger);
+
+ var settings = VerifyHelper.Create();
+ return Verifier.Verify(logger.GetLog(), settings);
+ }
+
+ [Fact]
+ public Task ParametrizedBenchmarks_LabelExpansion()
+ {
+var summary = new Summary(
+ "SingleBenchmarkSummary",
+ [
+ new BenchmarkReport(
+ success: true,
+ benchmarkCase: new BenchmarkCase(
+ new Descriptor(MockFactory.MockType, MockFactory.MockMethodInfo),
+ Job.Dry,
+ new ParameterInstances([
+ new ParameterInstance(new ParameterDefinition("param1", false, ["Parameter 1"], true, typeof(string), 0 ), "value1", SummaryStyle.Default),
+ new ParameterInstance(new ParameterDefinition("param2", false, ["Parameter 2"], true, typeof(string), 0 ), "value1", SummaryStyle.Default),
+ new ParameterInstance(new ParameterDefinition("param3", false, ["Parameter 3"], true, typeof(string), 0 ), "value1", SummaryStyle.Default)
+ ]),
+ ImmutableConfigBuilder.Create(new ManualConfig())),
+ null,
+ null,
+ [
+ new ExecuteResult([
+ new Measurement(0, IterationMode.Workload, IterationStage.Result, 1, 10, 1)
+ ])
+ ],
+ new List
+ {
+ new(new FakeMetricDescriptor("label", "label"), 42.0)
+ }),
+ new BenchmarkReport(
+ success: true,
+ benchmarkCase: new BenchmarkCase(
+ new Descriptor(MockFactory.MockType, MockFactory.MockMethodInfo),
+ Job.Dry,
+ new ParameterInstances([
+ new ParameterInstance(new ParameterDefinition("param1", false, ["Parameter 1"], true, typeof(string), 0 ), "value2", SummaryStyle.Default),
+ new ParameterInstance(new ParameterDefinition("param2", false, ["Parameter 2"], true, typeof(string), 0 ), "value2", SummaryStyle.Default),
+ new ParameterInstance(new ParameterDefinition("param3", false, ["Parameter 3"], true, typeof(string), 0 ), "value2", SummaryStyle.Default) ]),
+ ImmutableConfigBuilder.Create(new ManualConfig())),
+ null,
+ null,
+ [
+ new ExecuteResult([
+ new Measurement(0, IterationMode.Workload, IterationStage.Result, 1, 10, 1)
+ ])
+ ],
+ new List
+ {
+ new(new FakeMetricDescriptor("label", "label"), 42.0)
+ }),
+ new BenchmarkReport(
+ success: true,
+ benchmarkCase: new BenchmarkCase(
+ new Descriptor(MockFactory.MockType, MockFactory.MockMethodInfo),
+ Job.Dry,
+ new ParameterInstances([
+ new ParameterInstance(new ParameterDefinition("param1", false, ["Parameter 1"], true, typeof(string), 0 ), "value3", SummaryStyle.Default),
+ new ParameterInstance(new ParameterDefinition("param2", false, ["Parameter 2"], true, typeof(string), 0 ), "value3", SummaryStyle.Default),
+ new ParameterInstance(new ParameterDefinition("param3", false, ["Parameter 3"], true, typeof(string), 0 ), "value3", SummaryStyle.Default) ]),
+ ImmutableConfigBuilder.Create(new ManualConfig())),
+ null,
+ null,
+ [
+ new ExecuteResult([
+ new Measurement(0, IterationMode.Workload, IterationStage.Result, 1, 10, 1)
+ ])
+ ],
+ new List
+ {
+ new(new FakeMetricDescriptor("label", "label"), 42.0)
+ })
+ ],
+ HostEnvironmentInfo.GetCurrent(),
+ "",
+ "",
+ TimeSpan.Zero,
+ CultureInfo.InvariantCulture,
+ ImmutableArray.Empty,
+ ImmutableArray.Empty);
+ var logger = new AccumulationLogger();
+
+ OpenMetricsExporter.Default.ExportToLog(summary, logger);
+
+ var settings = VerifyHelper.Create();
+ return Verifier.Verify(logger.GetLog(), settings);
+ }
+
+ [Fact]
+ public Task LabelsAreEscapedCorrectly()
+ {
+ var summary = new Summary(
+ "",
+ [
+ new BenchmarkReport(
+ true,
+ new BenchmarkCase(
+ new Descriptor(MockFactory.MockType, MockFactory.MockMethodInfo),
+ Job.Dry,
+ new ParameterInstances(ImmutableArray.Empty),
+ ImmutableConfigBuilder.Create(new ManualConfig())),
+ null,
+ null,
+ [
+ new ExecuteResult([
+ new Measurement(0, IterationMode.Workload, IterationStage.Result, 1, 10, 1)
+ ])
+ ],
+ new List
+ {
+ new(new FakeMetricDescriptor("label_with_underscore", "label with underscore"), 42.0),
+ new(new FakeMetricDescriptor("label_with-dash", "label with dash"), 84.0),
+ new(new FakeMetricDescriptor("label with space", "label with space"), 126.0),
+ new(new FakeMetricDescriptor("label.with.dot", "label with dot"), 168.0),
+ new(new FakeMetricDescriptor("label with special chars !@#$%^&*()", "label with special chars !@#$%^&*()"), 210.0),
+ new(new FakeMetricDescriptor("label with special !@#$%^&*() chars", "label with special !@#$%^&*() chars"), 210.0),
+ new(new FakeMetricDescriptor("label with special !@#$%^&*()chars in the middle", "label with special !@#$%^&*()chars in the middle"), 210.0)
+ })
+ ],
+ HostEnvironmentInfo.GetCurrent(),
+ "",
+ "",
+ TimeSpan.Zero,
+ CultureInfo.InvariantCulture,
+ ImmutableArray.Empty,
+ ImmutableArray.Empty);
+ var logger = new AccumulationLogger();
+
+ OpenMetricsExporter.Default.ExportToLog(summary, logger);
+
+ var settings = VerifyHelper.Create();
+ return Verifier.Verify(logger.GetLog(), settings);
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_Invariant.verified.txt b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_Invariant.verified.txt
index b406e20dd1..068a5c521a 100644
--- a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_Invariant.verified.txt
+++ b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_Invariant.verified.txt
@@ -586,6 +586,55 @@ MarkdownExporter-stackoverflow
Foo | 1.000 ns | 0.000 ns | 0.000 ns | 1.000 ns | 7 |
Bar | 1.000 ns | 0.000 ns | 0.000 ns | 1.000 ns | 7 |
############################################
+OpenMetricsExporter
+############################################
+# HELP benchmark_cachemisses Additional metric CacheMisses
+# TYPE benchmark_cachemisses gauge
+benchmark_cachemisses{method="Foo", type="MockBenchmarkClass"} 7
+benchmark_cachemisses{method="Bar", type="MockBenchmarkClass"} 7
+# HELP benchmark_error_nanoseconds Standard error of the mean execution time in nanoseconds.
+# TYPE benchmark_error_nanoseconds gauge
+# UNIT benchmark_error_nanoseconds nanoseconds
+benchmark_error_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_error_nanoseconds{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_execution_time_nanoseconds Mean execution time in nanoseconds.
+# TYPE benchmark_execution_time_nanoseconds gauge
+# UNIT benchmark_execution_time_nanoseconds nanoseconds
+benchmark_execution_time_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_execution_time_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_gc_gen0_collections_total Total number of Gen 0 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen0_collections_total counter
+benchmark_gc_gen0_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen0_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen1_collections_total Total number of Gen 1 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen1_collections_total counter
+benchmark_gc_gen1_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen1_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen2_collections_total Total number of Gen 2 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen2_collections_total counter
+benchmark_gc_gen2_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen2_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_total_operations_total Total number of garbage collection operations during the benchmark execution.
+# TYPE benchmark_gc_total_operations_total counter
+benchmark_gc_total_operations_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_total_operations_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_p90_nanoseconds 90th percentile execution time in nanoseconds.
+# TYPE benchmark_p90_nanoseconds gauge
+# UNIT benchmark_p90_nanoseconds nanoseconds
+benchmark_p90_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_p90_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_p95_nanoseconds 95th percentile execution time in nanoseconds.
+# TYPE benchmark_p95_nanoseconds gauge
+# UNIT benchmark_p95_nanoseconds nanoseconds
+benchmark_p95_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_p95_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_stddev_nanoseconds Standard deviation of execution time in nanoseconds.
+# TYPE benchmark_stddev_nanoseconds gauge
+# UNIT benchmark_stddev_nanoseconds nanoseconds
+benchmark_stddev_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_stddev_nanoseconds{method="Bar", type="MockBenchmarkClass"} 0
+# EOF
+############################################
PlainExporter
############################################
*** MockBenchmarkClass.Foo: LongRun(IterationCount=100, LaunchCount=3, WarmupCount=15) ***
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_en-US.verified.txt b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_en-US.verified.txt
index b406e20dd1..068a5c521a 100644
--- a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_en-US.verified.txt
+++ b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_en-US.verified.txt
@@ -586,6 +586,55 @@ MarkdownExporter-stackoverflow
Foo | 1.000 ns | 0.000 ns | 0.000 ns | 1.000 ns | 7 |
Bar | 1.000 ns | 0.000 ns | 0.000 ns | 1.000 ns | 7 |
############################################
+OpenMetricsExporter
+############################################
+# HELP benchmark_cachemisses Additional metric CacheMisses
+# TYPE benchmark_cachemisses gauge
+benchmark_cachemisses{method="Foo", type="MockBenchmarkClass"} 7
+benchmark_cachemisses{method="Bar", type="MockBenchmarkClass"} 7
+# HELP benchmark_error_nanoseconds Standard error of the mean execution time in nanoseconds.
+# TYPE benchmark_error_nanoseconds gauge
+# UNIT benchmark_error_nanoseconds nanoseconds
+benchmark_error_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_error_nanoseconds{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_execution_time_nanoseconds Mean execution time in nanoseconds.
+# TYPE benchmark_execution_time_nanoseconds gauge
+# UNIT benchmark_execution_time_nanoseconds nanoseconds
+benchmark_execution_time_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_execution_time_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_gc_gen0_collections_total Total number of Gen 0 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen0_collections_total counter
+benchmark_gc_gen0_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen0_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen1_collections_total Total number of Gen 1 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen1_collections_total counter
+benchmark_gc_gen1_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen1_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen2_collections_total Total number of Gen 2 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen2_collections_total counter
+benchmark_gc_gen2_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen2_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_total_operations_total Total number of garbage collection operations during the benchmark execution.
+# TYPE benchmark_gc_total_operations_total counter
+benchmark_gc_total_operations_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_total_operations_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_p90_nanoseconds 90th percentile execution time in nanoseconds.
+# TYPE benchmark_p90_nanoseconds gauge
+# UNIT benchmark_p90_nanoseconds nanoseconds
+benchmark_p90_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_p90_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_p95_nanoseconds 95th percentile execution time in nanoseconds.
+# TYPE benchmark_p95_nanoseconds gauge
+# UNIT benchmark_p95_nanoseconds nanoseconds
+benchmark_p95_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_p95_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_stddev_nanoseconds Standard deviation of execution time in nanoseconds.
+# TYPE benchmark_stddev_nanoseconds gauge
+# UNIT benchmark_stddev_nanoseconds nanoseconds
+benchmark_stddev_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_stddev_nanoseconds{method="Bar", type="MockBenchmarkClass"} 0
+# EOF
+############################################
PlainExporter
############################################
*** MockBenchmarkClass.Foo: LongRun(IterationCount=100, LaunchCount=3, WarmupCount=15) ***
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_ru-RU.verified.txt b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_ru-RU.verified.txt
index b406e20dd1..068a5c521a 100644
--- a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_ru-RU.verified.txt
+++ b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/CommonExporterVerifyTests.Exporters_ru-RU.verified.txt
@@ -586,6 +586,55 @@ MarkdownExporter-stackoverflow
Foo | 1.000 ns | 0.000 ns | 0.000 ns | 1.000 ns | 7 |
Bar | 1.000 ns | 0.000 ns | 0.000 ns | 1.000 ns | 7 |
############################################
+OpenMetricsExporter
+############################################
+# HELP benchmark_cachemisses Additional metric CacheMisses
+# TYPE benchmark_cachemisses gauge
+benchmark_cachemisses{method="Foo", type="MockBenchmarkClass"} 7
+benchmark_cachemisses{method="Bar", type="MockBenchmarkClass"} 7
+# HELP benchmark_error_nanoseconds Standard error of the mean execution time in nanoseconds.
+# TYPE benchmark_error_nanoseconds gauge
+# UNIT benchmark_error_nanoseconds nanoseconds
+benchmark_error_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_error_nanoseconds{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_execution_time_nanoseconds Mean execution time in nanoseconds.
+# TYPE benchmark_execution_time_nanoseconds gauge
+# UNIT benchmark_execution_time_nanoseconds nanoseconds
+benchmark_execution_time_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_execution_time_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_gc_gen0_collections_total Total number of Gen 0 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen0_collections_total counter
+benchmark_gc_gen0_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen0_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen1_collections_total Total number of Gen 1 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen1_collections_total counter
+benchmark_gc_gen1_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen1_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen2_collections_total Total number of Gen 2 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen2_collections_total counter
+benchmark_gc_gen2_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_gen2_collections_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_total_operations_total Total number of garbage collection operations during the benchmark execution.
+# TYPE benchmark_gc_total_operations_total counter
+benchmark_gc_total_operations_total{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_gc_total_operations_total{method="Bar", type="MockBenchmarkClass"} 0
+# HELP benchmark_p90_nanoseconds 90th percentile execution time in nanoseconds.
+# TYPE benchmark_p90_nanoseconds gauge
+# UNIT benchmark_p90_nanoseconds nanoseconds
+benchmark_p90_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_p90_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_p95_nanoseconds 95th percentile execution time in nanoseconds.
+# TYPE benchmark_p95_nanoseconds gauge
+# UNIT benchmark_p95_nanoseconds nanoseconds
+benchmark_p95_nanoseconds{method="Foo", type="MockBenchmarkClass"} 1
+benchmark_p95_nanoseconds{method="Bar", type="MockBenchmarkClass"} 1
+# HELP benchmark_stddev_nanoseconds Standard deviation of execution time in nanoseconds.
+# TYPE benchmark_stddev_nanoseconds gauge
+# UNIT benchmark_stddev_nanoseconds nanoseconds
+benchmark_stddev_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+benchmark_stddev_nanoseconds{method="Bar", type="MockBenchmarkClass"} 0
+# EOF
+############################################
PlainExporter
############################################
*** MockBenchmarkClass.Foo: LongRun(IterationCount=100, LaunchCount=3, WarmupCount=15) ***
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.LabelsAreEscapedCorrectly.verified.txt b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.LabelsAreEscapedCorrectly.verified.txt
new file mode 100644
index 0000000000..a6c58c8799
--- /dev/null
+++ b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.LabelsAreEscapedCorrectly.verified.txt
@@ -0,0 +1,51 @@
+# HELP benchmark_error_nanoseconds Standard error of the mean execution time in nanoseconds.
+# TYPE benchmark_error_nanoseconds gauge
+# UNIT benchmark_error_nanoseconds nanoseconds
+benchmark_error_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+# HELP benchmark_execution_time_nanoseconds Mean execution time in nanoseconds.
+# TYPE benchmark_execution_time_nanoseconds gauge
+# UNIT benchmark_execution_time_nanoseconds nanoseconds
+benchmark_execution_time_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_gc_gen0_collections_total Total number of Gen 0 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen0_collections_total counter
+benchmark_gc_gen0_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen1_collections_total Total number of Gen 1 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen1_collections_total counter
+benchmark_gc_gen1_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen2_collections_total Total number of Gen 2 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen2_collections_total counter
+benchmark_gc_gen2_collections_total{method="Foo", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_total_operations_total Total number of garbage collection operations during the benchmark execution.
+# TYPE benchmark_gc_total_operations_total counter
+benchmark_gc_total_operations_total{method="Foo", type="MockBenchmarkClass"} 0
+# HELP benchmark_label_with_dash Additional metric label_with-dash
+# TYPE benchmark_label_with_dash gauge
+benchmark_label_with_dash{method="Foo", type="MockBenchmarkClass"} 84
+# HELP benchmark_label_with_dot Additional metric label.with.dot
+# TYPE benchmark_label_with_dot gauge
+benchmark_label_with_dot{method="Foo", type="MockBenchmarkClass"} 168
+# HELP benchmark_label_with_space Additional metric label with space
+# TYPE benchmark_label_with_space gauge
+benchmark_label_with_space{method="Foo", type="MockBenchmarkClass"} 126
+# HELP benchmark_label_with_special_chars Additional metric label with special chars !@#$%^&*()
+# TYPE benchmark_label_with_special_chars gauge
+benchmark_label_with_special_chars{method="Foo", type="MockBenchmarkClass"} 210
+# HELP benchmark_label_with_special_chars_in_the_middle Additional metric label with special !@#$%^&*()chars in the middle
+# TYPE benchmark_label_with_special_chars_in_the_middle gauge
+benchmark_label_with_special_chars_in_the_middle{method="Foo", type="MockBenchmarkClass"} 210
+# HELP benchmark_label_with_underscore Additional metric label_with_underscore
+# TYPE benchmark_label_with_underscore gauge
+benchmark_label_with_underscore{method="Foo", type="MockBenchmarkClass"} 42
+# HELP benchmark_p90_nanoseconds 90th percentile execution time in nanoseconds.
+# TYPE benchmark_p90_nanoseconds gauge
+# UNIT benchmark_p90_nanoseconds nanoseconds
+benchmark_p90_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_p95_nanoseconds 95th percentile execution time in nanoseconds.
+# TYPE benchmark_p95_nanoseconds gauge
+# UNIT benchmark_p95_nanoseconds nanoseconds
+benchmark_p95_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_stddev_nanoseconds Standard deviation of execution time in nanoseconds.
+# TYPE benchmark_stddev_nanoseconds gauge
+# UNIT benchmark_stddev_nanoseconds nanoseconds
+benchmark_stddev_nanoseconds{method="Foo", type="MockBenchmarkClass"} 0
+# EOF
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.ParametrizedBenchmarks_LabelExpansion.verified.txt b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.ParametrizedBenchmarks_LabelExpansion.verified.txt
new file mode 100644
index 0000000000..1a46db1490
--- /dev/null
+++ b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.ParametrizedBenchmarks_LabelExpansion.verified.txt
@@ -0,0 +1,56 @@
+# HELP benchmark_error_nanoseconds Standard error of the mean execution time in nanoseconds.
+# TYPE benchmark_error_nanoseconds gauge
+# UNIT benchmark_error_nanoseconds nanoseconds
+benchmark_error_nanoseconds{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0
+benchmark_error_nanoseconds{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0
+benchmark_error_nanoseconds{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_execution_time_nanoseconds Mean execution time in nanoseconds.
+# TYPE benchmark_execution_time_nanoseconds gauge
+# UNIT benchmark_execution_time_nanoseconds nanoseconds
+benchmark_execution_time_nanoseconds{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0.1
+benchmark_execution_time_nanoseconds{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0.1
+benchmark_execution_time_nanoseconds{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_gc_gen0_collections_total Total number of Gen 0 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen0_collections_total counter
+benchmark_gc_gen0_collections_total{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_gen0_collections_total{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_gen0_collections_total{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen1_collections_total Total number of Gen 1 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen1_collections_total counter
+benchmark_gc_gen1_collections_total{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_gen1_collections_total{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_gen1_collections_total{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen2_collections_total Total number of Gen 2 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen2_collections_total counter
+benchmark_gc_gen2_collections_total{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_gen2_collections_total{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_gen2_collections_total{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_total_operations_total Total number of garbage collection operations during the benchmark execution.
+# TYPE benchmark_gc_total_operations_total counter
+benchmark_gc_total_operations_total{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_total_operations_total{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_total_operations_total{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_label Additional metric label
+# TYPE benchmark_label gauge
+benchmark_label{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 42
+benchmark_label{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 42
+benchmark_label{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 42
+# HELP benchmark_p90_nanoseconds 90th percentile execution time in nanoseconds.
+# TYPE benchmark_p90_nanoseconds gauge
+# UNIT benchmark_p90_nanoseconds nanoseconds
+benchmark_p90_nanoseconds{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0.1
+benchmark_p90_nanoseconds{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0.1
+benchmark_p90_nanoseconds{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_p95_nanoseconds 95th percentile execution time in nanoseconds.
+# TYPE benchmark_p95_nanoseconds gauge
+# UNIT benchmark_p95_nanoseconds nanoseconds
+benchmark_p95_nanoseconds{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0.1
+benchmark_p95_nanoseconds{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0.1
+benchmark_p95_nanoseconds{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_stddev_nanoseconds Standard deviation of execution time in nanoseconds.
+# TYPE benchmark_stddev_nanoseconds gauge
+# UNIT benchmark_stddev_nanoseconds nanoseconds
+benchmark_stddev_nanoseconds{method="Foo", param1="value1", param2="value1", param3="value1", type="MockBenchmarkClass"} 0
+benchmark_stddev_nanoseconds{method="Foo", param1="value2", param2="value2", param3="value2", type="MockBenchmarkClass"} 0
+benchmark_stddev_nanoseconds{method="Foo", param1="value3", param2="value3", param3="value3", type="MockBenchmarkClass"} 0
+# EOF
diff --git a/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.SingleBenchmark_ProducesHelpAndTypeOnce.verified.txt b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.SingleBenchmark_ProducesHelpAndTypeOnce.verified.txt
new file mode 100644
index 0000000000..0312b809fd
--- /dev/null
+++ b/tests/BenchmarkDotNet.Tests/Exporters/VerifiedFiles/OpenMetricsExporterTests.SingleBenchmark_ProducesHelpAndTypeOnce.verified.txt
@@ -0,0 +1,56 @@
+# HELP benchmark_error_nanoseconds Standard error of the mean execution time in nanoseconds.
+# TYPE benchmark_error_nanoseconds gauge
+# UNIT benchmark_error_nanoseconds nanoseconds
+benchmark_error_nanoseconds{method="Foo", param1="value1", type="MockBenchmarkClass"} 0
+benchmark_error_nanoseconds{method="Foo", param1="value2", type="MockBenchmarkClass"} 0
+benchmark_error_nanoseconds{method="Foo", param1="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_execution_time_nanoseconds Mean execution time in nanoseconds.
+# TYPE benchmark_execution_time_nanoseconds gauge
+# UNIT benchmark_execution_time_nanoseconds nanoseconds
+benchmark_execution_time_nanoseconds{method="Foo", param1="value1", type="MockBenchmarkClass"} 0.1
+benchmark_execution_time_nanoseconds{method="Foo", param1="value2", type="MockBenchmarkClass"} 0.1
+benchmark_execution_time_nanoseconds{method="Foo", param1="value3", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_gc_gen0_collections_total Total number of Gen 0 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen0_collections_total counter
+benchmark_gc_gen0_collections_total{method="Foo", param1="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_gen0_collections_total{method="Foo", param1="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_gen0_collections_total{method="Foo", param1="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen1_collections_total Total number of Gen 1 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen1_collections_total counter
+benchmark_gc_gen1_collections_total{method="Foo", param1="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_gen1_collections_total{method="Foo", param1="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_gen1_collections_total{method="Foo", param1="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_gen2_collections_total Total number of Gen 2 garbage collections during the benchmark execution.
+# TYPE benchmark_gc_gen2_collections_total counter
+benchmark_gc_gen2_collections_total{method="Foo", param1="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_gen2_collections_total{method="Foo", param1="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_gen2_collections_total{method="Foo", param1="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_gc_total_operations_total Total number of garbage collection operations during the benchmark execution.
+# TYPE benchmark_gc_total_operations_total counter
+benchmark_gc_total_operations_total{method="Foo", param1="value1", type="MockBenchmarkClass"} 0
+benchmark_gc_total_operations_total{method="Foo", param1="value2", type="MockBenchmarkClass"} 0
+benchmark_gc_total_operations_total{method="Foo", param1="value3", type="MockBenchmarkClass"} 0
+# HELP benchmark_label Additional metric label
+# TYPE benchmark_label gauge
+benchmark_label{method="Foo", param1="value1", type="MockBenchmarkClass"} 42
+benchmark_label{method="Foo", param1="value2", type="MockBenchmarkClass"} 42
+benchmark_label{method="Foo", param1="value3", type="MockBenchmarkClass"} 42
+# HELP benchmark_p90_nanoseconds 90th percentile execution time in nanoseconds.
+# TYPE benchmark_p90_nanoseconds gauge
+# UNIT benchmark_p90_nanoseconds nanoseconds
+benchmark_p90_nanoseconds{method="Foo", param1="value1", type="MockBenchmarkClass"} 0.1
+benchmark_p90_nanoseconds{method="Foo", param1="value2", type="MockBenchmarkClass"} 0.1
+benchmark_p90_nanoseconds{method="Foo", param1="value3", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_p95_nanoseconds 95th percentile execution time in nanoseconds.
+# TYPE benchmark_p95_nanoseconds gauge
+# UNIT benchmark_p95_nanoseconds nanoseconds
+benchmark_p95_nanoseconds{method="Foo", param1="value1", type="MockBenchmarkClass"} 0.1
+benchmark_p95_nanoseconds{method="Foo", param1="value2", type="MockBenchmarkClass"} 0.1
+benchmark_p95_nanoseconds{method="Foo", param1="value3", type="MockBenchmarkClass"} 0.1
+# HELP benchmark_stddev_nanoseconds Standard deviation of execution time in nanoseconds.
+# TYPE benchmark_stddev_nanoseconds gauge
+# UNIT benchmark_stddev_nanoseconds nanoseconds
+benchmark_stddev_nanoseconds{method="Foo", param1="value1", type="MockBenchmarkClass"} 0
+benchmark_stddev_nanoseconds{method="Foo", param1="value2", type="MockBenchmarkClass"} 0
+benchmark_stddev_nanoseconds{method="Foo", param1="value3", type="MockBenchmarkClass"} 0
+# EOF