diff --git a/.kno/chunk_review.txt b/.kno/chunk_review.txt new file mode 100644 index 000000000..1ba280631 --- /dev/null +++ b/.kno/chunk_review.txt @@ -0,0 +1,4678 @@ + +=== File: exporter_test.go === + +-- Chunk 1 -- +// exporter_test.go:12-53 +func TestParsePositiveDuration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + testInput string + want positiveDuration + wantErr bool + }{ + { + "ParsePositiveDuration returns a positiveDuration", + "15ms", + positiveDuration{15 * time.Millisecond}, + false, + }, + { + "ParsePositiveDuration returns error for trying to parse negative value", + "-15ms", + positiveDuration{}, + true, + }, + { + "ParsePositiveDuration returns error for trying to parse empty string", + "", + positiveDuration{}, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got, err := parsePositiveDuration(tt.testInput) + if (err != nil) != tt.wantErr { + t.Errorf("parsePositiveDuration() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("parsePositiveDuration() = %v, want %v", got, tt.want) + } + }) + } +} + +-- Chunk 2 -- +// exporter_test.go:55-110 +func TestParseUnixSocketAddress(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + testInput string + wantSocketPath string + wantRequestPath string + wantErr bool + }{ + { + "Normal unix socket address", + "unix:/path/to/socket", + "/path/to/socket", + "", + false, + }, + { + "Normal unix socket address with location", + "unix:/path/to/socket:/with/location", + "/path/to/socket", + "/with/location", + false, + }, + { + "Unix socket address with trailing ", + "unix:/trailing/path:", + "/trailing/path", + "", + false, + }, + { + "Unix socket address with too many colons", + "unix:/too:/many:colons:", + "", + "", + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + socketPath, requestPath, err := parseUnixSocketAddress(tt.testInput) + if (err != nil) != tt.wantErr { + t.Errorf("parseUnixSocketAddress() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(socketPath, tt.wantSocketPath) { + t.Errorf("socket path: parseUnixSocketAddress() = %v, want %v", socketPath, tt.wantSocketPath) + } + if !reflect.DeepEqual(requestPath, tt.wantRequestPath) { + t.Errorf("request path: parseUnixSocketAddress() = %v, want %v", requestPath, tt.wantRequestPath) + } + }) + } +} + +-- Chunk 3 -- +// exporter_test.go:112-142 +func TestAddMissingEnvironmentFlags(t *testing.T) { + t.Parallel() + expectedMatches := map[string]string{ + "non-matching-flag": "", + "web.missing-env": "MISSING_ENV", + "web.has-env": "HAS_ENV_ALREADY", + "web.listen-address": "LISTEN_ADDRESS", + "web.config.file": "CONFIG_FILE", + } + kingpinflag.AddFlags(kingpin.CommandLine, ":9113") + kingpin.Flag("non-matching-flag", "").String() + kingpin.Flag("web.missing-env", "").String() + kingpin.Flag("web.has-env", "").Envar("HAS_ENV_ALREADY").String() + addMissingEnvironmentFlags(kingpin.CommandLine) + + // using Envar() on a flag returned from GetFlag() + // adds an additional flag, which is processed correctly + // at runtime but means that we need to check for a match + // instead of checking the envar of each matching flag name + for k, v := range expectedMatches { + matched := false + for _, f := range kingpin.CommandLine.Model().Flags { + if f.Name == k && f.Envar == v { + matched = true + } + } + if !matched { + t.Errorf("missing %s envar for %s", v, k) + } + } +} + +-- Chunk 4 -- +// exporter_test.go:144-170 +func TestConvertFlagToEnvar(t *testing.T) { + t.Parallel() + cases := []struct { + input string + output string + }{ + { + input: "dot.separate", + output: "DOT_SEPARATE", + }, + { + input: "underscore_separate", + output: "UNDERSCORE_SEPARATE", + }, + { + input: "mixed_separate_options", + output: "MIXED_SEPARATE_OPTIONS", + }, + } + + for _, c := range cases { + res := convertFlagToEnvar(c.input) + if res != c.output { + t.Errorf("expected %s to resolve to %s but got %s", c.input, c.output, res) + } + } +} + +=== File: .pre-commit-config.yaml === + +-- Chunk 1 -- +// /app/repos/repo_9/.pre-commit-config.yaml:1-56 +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + args: [--allow-multiple-documents] + - id: check-ast + - id: check-added-large-files + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: check-executables-have-shebangs + - id: check-symlinks + - id: check-case-conflict + - id: check-vcs-permalinks + - id: check-json + - id: pretty-format-json + args: [--autofix, --no-ensure-ascii] + - id: mixed-line-ending + args: [--fix=lf] + - id: no-commit-to-branch + - id: requirements-txt-fixer + - id: fix-byte-order-marker + + - repo: https://github.com/golangci/golangci-lint + rev: v2.1.5 + hooks: + - id: golangci-lint-full + + - repo: https://github.com/gitleaks/gitleaks + rev: v8.24.3 + hooks: + - id: gitleaks + + - repo: https://github.com/DavidAnson/markdownlint-cli2 + rev: v0.17.2 + hooks: + - id: markdownlint-cli2 + + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.37.0 + hooks: + - id: yamllint + + - repo: https://github.com/thlorenz/doctoc + rev: v2.2.0 + hooks: + - id: doctoc + args: [--update-only, --title, "## Table of Contents"] + +ci: + skip: [golangci-lint-full] + autoupdate_schedule: quarterly # We use renovate for more frequent updates and there's no way to disable autoupdate + +=== File: exporter.go === + +-- Chunk 1 -- +// exporter.go:36-36 +type positiveDuration struct{ time.Duration } + +-- Chunk 2 -- +// exporter.go:38-46 +func (pd *positiveDuration) Set(s string) error { + dur, err := parsePositiveDuration(s) + if err != nil { + return err + } + + pd.Duration = dur.Duration + return nil +} + +-- Chunk 3 -- +// exporter.go:48-57 +func parsePositiveDuration(s string) (positiveDuration, error) { + dur, err := time.ParseDuration(s) + if err != nil { + return positiveDuration{}, fmt.Errorf("failed to parse duration %q: %w", s, err) + } + if dur < 0 { + return positiveDuration{}, fmt.Errorf("negative duration %v is not valid", dur) + } + return positiveDuration{dur}, nil +} + +-- Chunk 4 -- +// exporter.go:59-63 +func createPositiveDurationFlag(s kingpin.Settings) (target *time.Duration) { + target = new(time.Duration) + s.SetValue(&positiveDuration{Duration: *target}) + return +} + +-- Chunk 5 -- +// exporter.go:65-79 +func parseUnixSocketAddress(address string) (string, string, error) { + addressParts := strings.Split(address, ":") + addressPartsLength := len(addressParts) + + if addressPartsLength > 3 || addressPartsLength < 1 { + return "", "", errors.New("address for unix domain socket has wrong format") + } + + unixSocketPath := addressParts[1] + requestPath := "" + if addressPartsLength == 3 { + requestPath = addressParts[2] + } + return unixSocketPath, requestPath, nil +} + +-- Chunk 6 -- +// exporter.go:100-221 +func main() { + kingpin.Flag("prometheus.const-label", "Label that will be used in every metric. Format is label=value. It can be repeated multiple times.").Envar("CONST_LABELS").StringMapVar(&constLabels) + + // convert deprecated flags to new format + for i, arg := range os.Args { + if strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") && len(arg) > 2 { + newArg := "-" + arg + fmt.Printf("the flag format is deprecated and will be removed in a future release, please use the new format: %s\n", newArg) + os.Args[i] = newArg + } + } + + config := &promslog.Config{} + + flag.AddFlags(kingpin.CommandLine, config) + kingpin.Version(common_version.Print(exporterName)) + kingpin.HelpFlag.Short('h') + + addMissingEnvironmentFlags(kingpin.CommandLine) + + kingpin.Parse() + logger := promslog.New(config) + + logger.Info("nginx-prometheus-exporter", "version", common_version.Info()) + logger.Info("build context", "build_context", common_version.BuildContext()) + + prometheus.MustRegister(version.NewCollector(exporterName)) + + if len(*scrapeURIs) == 0 { + logger.Error("no scrape addresses provided") + os.Exit(1) + } + + // #nosec G402 + sslConfig := &tls.Config{InsecureSkipVerify: !*sslVerify} + if *sslCaCert != "" { + caCert, err := os.ReadFile(*sslCaCert) + if err != nil { + logger.Error("loading CA cert failed", "err", err.Error()) + os.Exit(1) + } + sslCaCertPool := x509.NewCertPool() + ok := sslCaCertPool.AppendCertsFromPEM(caCert) + if !ok { + logger.Error("parsing CA cert file failed.") + os.Exit(1) + } + sslConfig.RootCAs = sslCaCertPool + } + + if *sslClientCert != "" && *sslClientKey != "" { + clientCert, err := tls.LoadX509KeyPair(*sslClientCert, *sslClientKey) + if err != nil { + logger.Error("loading client certificate failed", "error", err.Error()) + os.Exit(1) + } + sslConfig.Certificates = []tls.Certificate{clientCert} + } + + transport := &http.Transport{ + TLSClientConfig: sslConfig, + } + + if len(*scrapeURIs) == 1 { + registerCollector(logger, transport, (*scrapeURIs)[0], constLabels) + } else { + for _, addr := range *scrapeURIs { + // add scrape URI to const labels + labels := maps.Clone(constLabels) + labels["addr"] = addr + + registerCollector(logger, transport, addr, labels) + } + } + + http.Handle(*metricsPath, promhttp.Handler()) + + if *metricsPath != "/" && *metricsPath != "" { + landingConfig := web.LandingConfig{ + Name: "NGINX Prometheus Exporter", + Description: "Prometheus Exporter for NGINX and NGINX Plus", + HeaderColor: "#039900", + Version: common_version.Info(), + Links: []web.LandingLinks{ + { + Address: *metricsPath, + Text: "Metrics", + }, + }, + } + landingPage, err := web.NewLandingPage(landingConfig) + if err != nil { + logger.Error("failed to create landing page", "error", err.Error()) + os.Exit(1) + } + http.Handle("/", landingPage) + } + + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill, syscall.SIGTERM) + defer cancel() + + srv := &http.Server{ + ReadHeaderTimeout: 5 * time.Second, + } + + go func() { + if err := web.ListenAndServe(srv, webConfig, logger); err != nil { + if errors.Is(err, http.ErrServerClosed) { + logger.Info("HTTP server closed", "error", err.Error()) + os.Exit(0) + } + logger.Error("HTTP server failed", "error", err.Error()) + os.Exit(1) + } + }() + + <-ctx.Done() + logger.Info("shutting down") + srvCtx, srvCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer srvCancel() + _ = srv.Shutdown(srvCtx) +} + +-- Chunk 7 -- +// exporter.go:223-261 +func registerCollector(logger *slog.Logger, transport *http.Transport, + addr string, labels map[string]string, +) { + if strings.HasPrefix(addr, "unix:") { + socketPath, requestPath, err := parseUnixSocketAddress(addr) + if err != nil { + logger.Error("parsing unix domain socket scrape address failed", "uri", addr, "error", err.Error()) + os.Exit(1) + } + + transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", socketPath) + } + addr = "http://unix" + requestPath + } + + userAgent := fmt.Sprintf("NGINX-Prometheus-Exporter/v%v", common_version.Version) + + httpClient := &http.Client{ + Timeout: *timeout, + Transport: &userAgentRoundTripper{ + agent: userAgent, + rt: transport, + }, + } + + if *nginxPlus { + plusClient, err := plusclient.NewNginxClient(addr, plusclient.WithHTTPClient(httpClient)) + if err != nil { + logger.Error("could not create Nginx Plus Client", "error", err.Error()) + os.Exit(1) + } + variableLabelNames := collector.NewVariableLabelNames(nil, nil, nil, nil, nil, nil, nil) + prometheus.MustRegister(collector.NewNginxPlusCollector(plusClient, "nginxplus", variableLabelNames, labels, logger)) + } else { + ossClient := client.NewNginxClient(httpClient, addr) + prometheus.MustRegister(collector.NewNginxCollector(ossClient, "nginx", labels, logger)) + } +} + +-- Chunk 8 -- +// exporter.go:263-266 +type userAgentRoundTripper struct { + rt http.RoundTripper + agent string +} + +-- Chunk 9 -- +// exporter.go:268-276 +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + req.Header.Set("User-Agent", rt.agent) + roundTrip, err := rt.rt.RoundTrip(req) + if err != nil { + return nil, fmt.Errorf("round trip failed: %w", err) + } + return roundTrip, nil +} + +-- Chunk 10 -- +// exporter.go:278-290 +func cloneRequest(req *http.Request) *http.Request { + r := new(http.Request) + *r = *req // shallow clone + + // deep copy headers + r.Header = make(http.Header, len(req.Header)) + for key, values := range req.Header { + newValues := make([]string, len(values)) + copy(newValues, values) + r.Header[key] = newValues + } + return r +} + +-- Chunk 11 -- +// exporter.go:294-303 +func addMissingEnvironmentFlags(ka *kingpin.Application) { + for _, f := range ka.Model().Flags { + if strings.HasPrefix(f.Name, "web.") && f.Envar == "" { + retrievedFlag := ka.GetFlag(f.Name) + if retrievedFlag != nil { + retrievedFlag.Envar(convertFlagToEnvar(strings.TrimPrefix(f.Name, "web."))) + } + } + } +} + +-- Chunk 12 -- +// exporter.go:305-311 +func convertFlagToEnvar(f string) string { + env := strings.ToUpper(f) + for _, s := range []string{"-", "."} { + env = strings.ReplaceAll(env, s, "_") + } + return env +} + +=== File: renovate.json === + +-- Chunk 1 -- +// /app/repos/repo_9/renovate.json:1-7 +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "github>nginx/k8s-common", + "schedule:earlyMondays" + ] +} + +=== File: .markdownlint-cli2.yaml === + +-- Chunk 1 -- +// /app/repos/repo_9/.markdownlint-cli2.yaml:1-18 +# Rule configuration. +# For rule descriptions and how to fix: https://github.com/DavidAnson/markdownlint/tree/main#rules--aliases +config: + ul-style: + style: dash + no-duplicate-heading: + siblings_only: true + line-length: + line_length: 120 + code_blocks: false + tables: false + +# Define glob expressions to ignore +ignores: + - ".github/" + +# Fix any fixable errors +fix: true + +=== File: SECURITY.md === + +-- Chunk 1 -- +// /app/repos/repo_9/SECURITY.md:1-18 +# Security Policy + +## Supported Versions + +We advise users to use the most recent release of the NGINX Prometheus Exporter. The commercial support is available for +NGINX Plus customers when the NGINX Prometheus Exporter is used with NGINX Ingress Controller. + +## Reporting a Vulnerability + +The F5 Security Incident Response Team (F5 SIRT) has an email alias that makes it easy to report potential security +vulnerabilities. + +- If you’re an F5 customer with an active support contract, please contact [F5 Technical + Support](https://www.f5.com/services/support). +- If you aren’t an F5 customer, please report any potential or current instances of security vulnerabilities with any F5 + product to the F5 Security Incident Response Team at + +For more information visit + +=== File: CODE_OF_CONDUCT.md === + +-- Chunk 1 -- +// /app/repos/repo_9/CODE_OF_CONDUCT.md:1-75 +# Code of Conduct + +This project and everyone participating in it is governed by this code. + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at + +[homepage]: https://www.contributor-covenant.org + +=== File: release-process.md === + +-- Chunk 1 -- +// /app/repos/repo_9/release-process.md:1-45 +# Release Process + +This document outlines the steps involved in the release process for the NGINX Prometheus Exporter. + + + +## Table of Contents + +- [Versioning](#versioning) +- [Release Planning and Development](#release-planning-and-development) +- [Releasing a New Version](#releasing-a-new-version) + + + +## Versioning + +The project follows [Semantic Versioning](https://semver.org/). + +## Release Planning and Development + +The features that will go into the next release are reflected in the +corresponding [milestone](https://github.com/nginx/nginx-prometheus-exporter/milestones). Refer to +the [Issue Lifecycle](/ISSUE_LIFECYCLE.md) document for information on issue creation and assignment to releases. + +## Releasing a New Version + +1. Create an issue to define and track release-related activities. Choose a title that follows the + format `Release X.Y.Z`. +2. Stop merging any new work into the main branch. +3. Check the release draft under the [GitHub releases](https://github.com/nginx/nginx-prometheus-exporter/releases) page + to ensure that everything is in order. +4. Create a PR to update the version in the `Makefile` and [README](README.md) to the new version, and any other necessary + changes. +5. Once the PR is merged, create a new release tag in the format `vX.Y.Z`: + + ```bash + git tag -a vX.Y.Z -m "Release vX.Y.Z" + git push origin vX.Y.Z + ``` + + As a result, the CI/CD pipeline will: + + - Build the Docker image and push it to the registries. + - Publish the release to Snapcraft, Homebrew, Scoop, and Nix. + - Create a GitHub release with the autogenerated changelog and artifacts attached. + +=== File: .codecov.yml === + +-- Chunk 1 -- +// /app/repos/repo_9/.codecov.yml:1-7 +coverage: + status: + project: + default: + informational: true +comment: + require_changes: true + +=== File: CONTRIBUTING.md === + +-- Chunk 1 -- +// /app/repos/repo_9/CONTRIBUTING.md:1-101 +# Contributing Guidelines + +The following is a set of guidelines for contributing to the NGINX Prometheus Exporter. We really appreciate that you +are considering contributing! + +## Table Of Contents + + + +## Table of Contents + +- [Ask a Question](#ask-a-question) +- [Getting Started](#getting-started) + - [Project Structure](#project-structure) +- [Contributing](#contributing) + - [Report a Bug](#report-a-bug) + - [Suggest an Enhancement](#suggest-an-enhancement) + - [Open a Pull Request](#open-a-pull-request) + - [Issue lifecycle](#issue-lifecycle) + - [F5 Contributor License Agreement (CLA)](#f5-contributor-license-agreement-cla) +- [Style Guides](#style-guides) + - [Git Style Guide](#git-style-guide) + - [Go Style Guide](#go-style-guide) + + + +## Ask a Question + +To ask a question, please use [GitHub Discussions](https://github.com/nginx/nginx-prometheus-exporter/discussions). + +Please reserve GitHub issues for feature requests and bugs rather than general questions. + +## Getting Started + +Follow our [Getting Started Guide](README.md#getting-started) to get the NGINX Prometheus Exporter up and running. + +### Project Structure + +- This Prometheus Exporter is written in Go and supports both the open source NGINX software and NGINX Plus. +- We use [Go modules](https://github.com/golang/go/wiki/Modules) for managing dependencies. + +## Contributing + +### Report a Bug + +To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please +ensure the issue has not already been reported. + +### Suggest an Enhancement + +To suggest an enhancement, please create an issue on GitHub with the label `enhancement` using the available feature +issue template. + +### Open a Pull Request + +- Fork the repo, create a branch, submit a PR when your changes are tested and ready for review +- Fill in [our pull request template](.github/PULL_REQUEST_TEMPLATE.md) + +> **Note** +> +> If you’d like to implement a new feature, please consider creating a feature request issue first to start a discussion +> about the feature. + +### Issue lifecycle + +- When an issue or PR is created, it will be triaged by the core development team and assigned a label to indicate the + type of issue it is (bug, feature request, etc) and to determine the milestone. Please see the [Issue + Lifecycle](ISSUE_LIFECYCLE.md) document for more information. + +### F5 Contributor License Agreement (CLA) + +F5 requires all external contributors to agree to the terms of the F5 CLA (available [here](https://github.com/f5/.github/blob/main/CLA/cla-markdown.md)) +before any of their changes can be incorporated into an F5 Open Source repository. + +If you have not yet agreed to the F5 CLA terms and submit a PR to this repository, a bot will prompt you to view and +agree to the F5 CLA. You will have to agree to the F5 CLA terms through a comment in the PR before any of your changes +can be merged. Your agreement signature will be safely stored by F5 and no longer be required in future PRs. + +## Style Guides + +### Git Style Guide + +- Keep a clean, concise and meaningful git commit history on your branch, rebasing locally and squashing before + submitting a PR +- Follow the guidelines of writing a good commit message as described here + and summarized in the next few points + - In the subject line, use the present tense ("Add feature" not "Added feature") + - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...") + - Limit the subject line to 72 characters or less + - Reference issues and pull requests liberally after the subject line + - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in + your text editor to write a good message instead of `git commit -am`) + +### Go Style Guide + +- Run `gofmt` over your code to automatically resolve a lot of style issues. Most editors support this running + automatically when saving a code file. +- Run `go lint` and `go vet` on your code too to catch any other issues. +- Follow this guide on some good practice and idioms for Go - +- To check for extra issues, install [golangci-lint](https://github.com/golangci/golangci-lint) and run `make lint` or + `golangci-lint run` + +=== File: ISSUE_LIFECYCLE.md === + +-- Chunk 1 -- +// /app/repos/repo_9/ISSUE_LIFECYCLE.md:1-53 +# Issue Lifecycle + +To ensure a balance between work carried out by the NGINX engineering team while encouraging community involvement on +this project, we use the following issue lifecycle. (Note: The issue *creator* refers to the community member that +created the issue. The issue *owner* refers to the NGINX team member that is responsible for managing the issue +lifecycle.) + +1. New issue created by community member. + +2. Assign issue owner: All new issues are assigned an owner on the NGINX engineering team. This owner shepherds the + issue through the subsequent stages in the issue lifecycle. + +3. Determine issue type: This is done with automation where possible, and manually by the owner where necessary. The + associated label is applied to the issue. + + Possible Issue Types: + + - `needs more info`: The owner should use the issue to request information from the creator. If we don't receive the + needed information within 7 days, automation closes the issue. + + - `bug`: The implementation of a feature is not correct. + + - `proposal`: Request for a change. This can be a new feature, tackling technical debt, documentation changes, or + improving existing features. + + - `question`: The owner converts the issue to a github discussion and engages the creator. + +4. Determine milestone: The owner, in collaboration with the wider team (PM & engineering), determines what milestone to + attach to an issue. Generally, milestones correspond to product releases - however there are two 'magic' milestones + with special meanings (not tied to a specific release): + + - Issues assigned to backlog: Our team is in favour of implementing the feature request/fixing the issue, however the + implementation is not yet assigned to a concrete release. If and when a `backlog` issue aligns well with our + roadmap, it will be scheduled for a concrete iteration. We review and update our roadmap at least once every + quarter. The `backlog` list helps us shape our roadmap, but it is not the only source of input. Therefore, some + `backlog` items may eventually be closed as `out of scope`, or relabelled as `backlog candidate` once it becomes + clear that they do not align with our evolving roadmap. + + - Issues assigned to `backlog candidate`: Our team does not intend to implement the feature/fix request described in + the issue and wants the community to weigh in before we make our final decision. + + `backlog` issues can be labeled by the owner as `help wanted` and/or `good first issue` as appropriate. + +5. Promotion of `backlog candidate` issue to `backlog` issue: If an issue labelled `backlog candidate` receives more + than 30 upvotes within 60 days, we promote the issue by applying the `backlog` label. While issues promoted in this + manner have not been committed to a particular release, we welcome PRs from the community on them. + + If an issue does not make our roadmap and has not been moved to a discussion, it is closed with the label `out of + scope`. The goal is to get every issue in the issues list to one of the following end states: + + - An assigned release. + - The `backlog` label. + - Closed as `out of scope`. + +=== File: CHANGELOG.md === + +-- Chunk 1 -- +// /app/repos/repo_9/CHANGELOG.md:1-150 +# Changelog + +Starting with version 0.9.0 an automatically generated list of changes can be found on the [GitHub Releases page](https://github.com/nginx/nginx-prometheus-exporter/releases). + +## 0.8.0 + +CHANGES: + +- [103](https://github.com/nginx/nginx-prometheus-exporter/pull/103): Switch to `gcr.io/distroless/static` image. Use + a non-root user to run the exporter process by default. Thanks to [Alex SZAKALY](https://github.com/alex1989hu). +- Update Go version to 1.14 + +FIXES: + +- [99](https://github.com/nginx/nginx-prometheus-exporter/pull/99): Fix link to metrics path. Thanks to [Yoan Blanc](https://github.com/greut). +- [101](https://github.com/nginx/nginx-prometheus-exporter/pull/101): docs: fix dockerfile link. Thanks to [Eric Carboni](https://github.com/eric-hc). + +UPGRADE: + +- Use the 0.8.0 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.8.0` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.8.0). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R19 or newer. + +## 0.7.0 + +FEATURES: + +- [86](https://github.com/nginx/nginx-prometheus-exporter/pull/86): Implemented TLS client certificate + authentication. Thanks to [Fabian Lüpke](https://github.com/Fluepke). + +FIXES: + +- [96](https://github.com/nginx/nginx-prometheus-exporter/pull/96): Add const labels to upMetric. Thanks to [Robert + Toth](https://github.com/robert-toth). + +UPGRADE: + +- Use the 0.7.0 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.7.0` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.7.0). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R19 or newer. + +## 0.6.0 + +FEATURES: + +- [77](https://github.com/nginx/nginx-prometheus-exporter/pull/77): Add constLabels support via cli arg/env variable. + +CHANGES: + +- Update alpine image. + +UPGRADE: + +- Use the 0.6.0 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.6.0` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.6.0). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R19 or newer. + +## 0.5.0 + +FEATURES: + +- [70](https://github.com/nginx/nginx-prometheus-exporter/pull/70): Set user agent on scrape requests to nginx. +- [68](https://github.com/nginx/nginx-prometheus-exporter/pull/68): Add ability to scrape and listen on unix domain + sockets. +- [64](https://github.com/nginx/nginx-prometheus-exporter/pull/64): Add location zone and resolver metric support. + +FIXES: + +- [73](https://github.com/nginx/nginx-prometheus-exporter/pull/73): Fix typo in stream_zone_sync_status_nodes_online + metric description. +- [71](https://github.com/nginx/nginx-prometheus-exporter/pull/71): Do not assume default datasource in Grafana + panels. +- [62](https://github.com/nginx/nginx-prometheus-exporter/pull/62): Set correct nginx_up query and instance variable + expression. + +UPGRADE: + +- Use the 0.5.0 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.5.0` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.5.0). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R19 or newer. + +## 0.4.2 + +FIXES: + +- [60](https://github.com/nginx/nginx-prometheus-exporter/pull/60): _Fix session metrics for stream server zones_. + Session metrics with a status of `4xx` or `5xx` are now correctly reported. Previously they were always reported as + `0`. + +UPGRADE: + +- Use the 0.4.2 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.4.2` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.4.2). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R18 or newer. + +## 0.4.1 + +FIXES: + +- [55](https://github.com/nginx/nginx-prometheus-exporter/pull/55): Do not export zone sync metrics if they are not + reported by NGINX Plus. Previously, in such case, the metrics were exported with zero values. + +UPGRADE: + +- Use the 0.4.1 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.4.1` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.4.1). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R18 or newer. + +## 0.4.0 + +FEATURES: + +- [50](https://github.com/nginx/nginx-prometheus-exporter/pull/50): Add zone sync metrics support. +- [37](https://github.com/nginx/nginx-prometheus-exporter/pull/37): Implement a way to retry connection to NGINX if + it is unreachable. Add -nginx.retries for setting the number of retries and -nginx.retry-interval for setting the + interval between retries, both as cli-arguments. + +UPGRADE: + +- Use the 0.4.0 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.4.0` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.4.0). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R18 or newer. + +-- Chunk 2 -- +// /app/repos/repo_9/CHANGELOG.md:151-191 + +## 0.3.0 + +FEATURES: + +- [32](https://github.com/nginx/nginx-prometheus-exporter/pull/32): Add nginxexporter_build_info metric. +- [31](https://github.com/nginx/nginx-prometheus-exporter/pull/31): Implement nginx_up and nginxplus_up metrics. Add + -nginx.timeout cli argument for setting a timeout for scrapping metrics from NGINX or NGINX Plus. + +UPGRADE: + +- Use the 0.3.0 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.3.0` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.3.0). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R14 or newer. + +## 0.2.0 + +FEATURES: + +- [16](https://github.com/nginx/nginx-prometheus-exporter/pull/16): Add stream metrics support. +- [13](https://github.com/nginx/nginx-prometheus-exporter/pull/13): Add a flag for controlling SSL verification of + NGINX stub_status/API endpoint. Thanks to [Raza Jhaveri](https://github.com/razaj92). +- [3](https://github.com/nginx/nginx-prometheus-exporter/pull/3): Support for environment variables. + +UPGRADE: + +- Use the 0.2.0 image from our DockerHub: `nginx/nginx-prometheus-exporter:0.2.0` +- Download the latest binaries from [GitHub releases page](https://github.com/nginx/nginx-prometheus-exporter/releases/tag/v0.2.0). + +COMPATIBILITY: + +- NGINX 0.1.18 or newer. +- NGINX Plus R14 or newer. + +## 0.1.0 + +- Initial release. + +=== File: README.md === + +-- Chunk 1 -- +// /app/repos/repo_9/README.md:1-150 + +[![OpenSSFScorecard](https://api.securityscorecards.dev/projects/github.com/nginx/nginx-prometheus-exporter/badge)](https://scorecard.dev/viewer/?uri=github.com/nginx/nginx-prometheus-exporter) +[![CI](https://github.com/nginx/nginx-prometheus-exporter/actions/workflows/ci.yml/badge.svg)](https://github.com/nginx/nginx-prometheus-exporter/actions/workflows/ci.yml) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgithub.com%2Fnginx%2Fnginx-prometheus-exporter.svg?type=shield)](https://app.fossa.com/projects/custom%2B5618%2Fgithub.com%2Fnginx%2Fnginx-prometheus-exporter?ref=badge_shield) +[![Go Report Card](https://goreportcard.com/badge/github.com/nginx/nginx-prometheus-exporter)](https://goreportcard.com/report/github.com/nginx/nginx-prometheus-exporter) +[![codecov](https://codecov.io/gh/nginx/nginx-prometheus-exporter/graph/badge.svg?token=J6Oz10LWy3)](https://codecov.io/gh/nginx/nginx-prometheus-exporter) +![GitHub all releases](https://img.shields.io/github/downloads/nginx/nginx-prometheus-exporter/total?logo=github) +![GitHub release (latest by SemVer)](https://img.shields.io/github/downloads/nginx/nginx-prometheus-exporter/latest/total?sort=semver&logo=github) +[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/nginx/nginx-prometheus-exporter?logo=github&sort=semver)](https://github.com/nginx/nginx-prometheus-exporter/releases/latest) +[![nginx-prometheus-exporter](https://snapcraft.io/nginx-prometheus-exporter/badge.svg)](https://snapcraft.io/nginx-prometheus-exporter) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/nginx/nginx-prometheus-exporter?logo=go) +[![Docker Pulls](https://img.shields.io/docker/pulls/nginx/nginx-prometheus-exporter?logo=docker&logoColor=white)](https://hub.docker.com/r/nginx/nginx-prometheus-exporter) +![Docker Image Size (latest semver)](https://img.shields.io/docker/image-size/nginx/nginx-prometheus-exporter?logo=docker&logoColor=white&sort=semver) +[![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) + +# NGINX Prometheus Exporter + +NGINX Prometheus exporter makes it possible to monitor NGINX or NGINX Plus using Prometheus. + + + +## Table of Contents + +- [Overview](#overview) +- [Getting Started](#getting-started) + - [A Note about NGINX Ingress Controller](#a-note-about-nginx-ingress-controller) + - [Prerequisites](#prerequisites) + - [Running the Exporter in a Docker Container](#running-the-exporter-in-a-docker-container) + - [Running the Exporter Binary](#running-the-exporter-binary) +- [Usage](#usage) + - [Command-line Arguments](#command-line-arguments) +- [Exported Metrics](#exported-metrics) + - [Common metrics](#common-metrics) + - [Metrics for NGINX OSS](#metrics-for-nginx-oss) + - [Stub status metrics](#stub-status-metrics) + - [Metrics for NGINX Plus](#metrics-for-nginx-plus) + - [Connections](#connections) + - [HTTP](#http) + - [SSL](#ssl) + - [HTTP Server Zones](#http-server-zones) + - [Stream Server Zones](#stream-server-zones) + - [HTTP Upstreams](#http-upstreams) + - [Stream Upstreams](#stream-upstreams) + - [Stream Zone Sync](#stream-zone-sync) + - [Location Zones](#location-zones) + - [Resolver](#resolver) + - [HTTP Requests Rate Limiting](#http-requests-rate-limiting) + - [HTTP Connections Limiting](#http-connections-limiting) + - [Stream Connections Limiting](#stream-connections-limiting) + - [Cache](#cache) + - [Worker](#worker) +- [Troubleshooting](#troubleshooting) +- [Releases](#releases) + - [Docker images](#docker-images) + - [Binaries](#binaries) + - [Homebrew](#homebrew) + - [Snap](#snap) + - [Scoop](#scoop) + - [Nix](#nix) +- [Building the Exporter](#building-the-exporter) + - [Building the Docker Image](#building-the-docker-image) + - [Building the Binary](#building-the-binary) +- [Grafana Dashboard](#grafana-dashboard) +- [SBOM (Software Bill of Materials)](#sbom-software-bill-of-materials) + - [Binaries](#binaries-1) + - [Docker Image](#docker-image) +- [Provenance](#provenance) +- [Contacts](#contacts) +- [Contributing](#contributing) +- [Support](#support) +- [License](#license) + + + +## Overview + +[NGINX](https://nginx.org) exposes a handful of metrics via the [stub_status +page](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html#stub_status). [NGINX +Plus](https://www.nginx.com/products/nginx/) provides a richer set of metrics via the +[API](https://nginx.org/en/docs/http/ngx_http_api_module.html) and the [monitoring +dashboard](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/). NGINX Prometheus exporter +fetches the metrics from a single NGINX or NGINX Plus, converts the metrics into appropriate Prometheus metrics types +and finally exposes them via an HTTP server to be collected by [Prometheus](https://prometheus.io/). + +## Getting Started + +In this section, we show how to quickly run NGINX Prometheus Exporter for NGINX or NGINX Plus. + +### A Note about NGINX Ingress Controller + +If you’d like to use the NGINX Prometheus Exporter with [NGINX Ingress +Controller](https://github.com/nginx/kubernetes-ingress/) for Kubernetes, see [this +doc](https://docs.nginx.com/nginx-ingress-controller/logging-and-monitoring/prometheus/) for the installation +instructions. + +### Prerequisites + +We assume that you have already installed Prometheus and NGINX or NGINX Plus. Additionally, you need to: + +- Expose the built-in metrics in NGINX/NGINX Plus: + - For NGINX, expose the [stub_status + page](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html#stub_status) at `/stub_status` on port `8080`. + - For NGINX Plus, expose the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html#api) at `/api` on port + `8080`. +- Configure Prometheus to scrape metrics from the server with the exporter. Note that the default scrape port of the + exporter is `9113` and the default metrics path -- `/metrics`. + +### Running the Exporter in a Docker Container + +To start the exporter we use the [docker run](https://docs.docker.com/engine/reference/run/) command. + +- To export NGINX metrics, run: + + ```console + docker run -p 9113:9113 nginx/nginx-prometheus-exporter:1.4.2 --nginx.scrape-uri=http://:8080/stub_status + ``` + + where `` is the IP address/DNS name, through which NGINX is available. + +- To export NGINX Plus metrics, run: + + ```console + docker run -p 9113:9113 nginx/nginx-prometheus-exporter:1.4.2 --nginx.plus --nginx.scrape-uri=http://:8080/api + ``` + + where `` is the IP address/DNS name, through which NGINX Plus is available. + +### Running the Exporter Binary + +- To export NGINX metrics, run: + + ```console + nginx-prometheus-exporter --nginx.scrape-uri=http://:8080/stub_status + ``` + + where `` is the IP address/DNS name, through which NGINX is available. + +- To export NGINX Plus metrics: + + ```console + nginx-prometheus-exporter --nginx.plus --nginx.scrape-uri=http://:8080/api + ``` + + where `` is the IP address/DNS name, through which NGINX Plus is available. + +- To scrape NGINX metrics with unix domain sockets, run: + + ```console + nginx-prometheus-exporter --nginx.scrape-uri=unix::/stub_status + ``` + +-- Chunk 2 -- +// /app/repos/repo_9/README.md:151-300 + + where `` is the path to unix domain socket, through which NGINX stub status is available. + +**Note**. The `nginx-prometheus-exporter` is not a daemon. To run the exporter as a system service (daemon), you can +follow the example in [examples/systemd](./examples/systemd/README.md). Alternatively, you can run the exporter +in a Docker container. + +## Usage + +### Command-line Arguments + +```console +usage: nginx-prometheus-exporter [] + + +Flags: + -h, --[no-]help Show context-sensitive help (also try --help-long and --help-man). + --[no-]web.systemd-socket Use systemd socket activation listeners instead + of port listeners (Linux only). ($SYSTEMD_SOCKET) + --web.listen-address=:9113 ... + Addresses on which to expose metrics and web interface. Repeatable for multiple addresses. ($LISTEN_ADDRESS) + --web.config.file="" Path to configuration file that can enable TLS or authentication. See: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md ($CONFIG_FILE) + --web.telemetry-path="/metrics" + Path under which to expose metrics. ($TELEMETRY_PATH) + --[no-]nginx.plus Start the exporter for NGINX Plus. By default, the exporter is started for NGINX. ($NGINX_PLUS) + --nginx.scrape-uri=http://127.0.0.1:8080/stub_status ... + A URI or unix domain socket path for scraping NGINX or NGINX Plus metrics. For NGINX, the stub_status page must be available through the URI. For NGINX Plus -- the API. Repeatable for multiple URIs. ($SCRAPE_URI) + --[no-]nginx.ssl-verify Perform SSL certificate verification. ($SSL_VERIFY) + --nginx.ssl-ca-cert="" Path to the PEM encoded CA certificate file used to validate the servers SSL certificate. ($SSL_CA_CERT) + --nginx.ssl-client-cert="" + Path to the PEM encoded client certificate file to use when connecting to the server. ($SSL_CLIENT_CERT) + --nginx.ssl-client-key="" Path to the PEM encoded client certificate key file to use when connecting to the server. ($SSL_CLIENT_KEY) + --nginx.timeout=5s A timeout for scraping metrics from NGINX or NGINX Plus. ($TIMEOUT) + --prometheus.const-label=PROMETHEUS.CONST-LABEL ... + Label that will be used in every metric. Format is label=value. It can be repeated multiple times. ($CONST_LABELS) + --log.level=info Only log messages with the given severity or above. One of: [debug, info, warn, error] + --log.format=logfmt Output format of log messages. One of: [logfmt, json] + --[no-]version Show application version. +``` + +## Exported Metrics + +### Common metrics + +| Name | Type | Description | Labels | +| -------------------------------------------- | -------- | -------------------------------------------- | ------------------------------------------------------------------------- | +| `nginx_exporter_build_info` | Gauge | Shows the exporter build information. | `branch`, `goarch`, `goos`, `goversion`, `revision`, `tags` and `version` | +| `promhttp_metric_handler_requests_total` | Counter | Total number of scrapes by HTTP status code. | `code` (the HTTP status code) | +| `promhttp_metric_handler_requests_in_flight` | Gauge | Current number of scrapes being served. | [] | +| `go_*` | Multiple | Go runtime metrics. | [] | + +### Metrics for NGINX OSS + +| Name | Type | Description | Labels | +| ---------- | ----- | ------------------------------------------------------------------------------------------------ | ------ | +| `nginx_up` | Gauge | Shows the status of the last metric scrape: `1` for a successful scrape and `0` for a failed one | [] | + +#### [Stub status metrics](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html) + +| Name | Type | Description | Labels | +| ---------------------------- | ------- | ------------------------------------------------------------------- | ------ | +| `nginx_connections_accepted` | Counter | Accepted client connections. | [] | +| `nginx_connections_active` | Gauge | Active client connections. | [] | +| `nginx_connections_handled` | Counter | Handled client connections. | [] | +| `nginx_connections_reading` | Gauge | Connections where NGINX is reading the request header. | [] | +| `nginx_connections_waiting` | Gauge | Idle client connections. | [] | +| `nginx_connections_writing` | Gauge | Connections where NGINX is writing the response back to the client. | [] | +| `nginx_http_requests_total` | Counter | Total http requests. | [] | + +### Metrics for NGINX Plus + +| Name | Type | Description | Labels | +| -------------- | ----- | ------------------------------------------------------------------------------------------------ | ------ | +| `nginxplus_up` | Gauge | Shows the status of the last metric scrape: `1` for a successful scrape and `0` for a failed one | [] | + +#### [Connections](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_connections) + +| Name | Type | Description | Labels | +| -------------------------------- | ------- | ---------------------------------- | ------ | +| `nginxplus_connections_accepted` | Counter | Accepted client connections | [] | +| `nginxplus_connections_active` | Gauge | Active client connections | [] | +| `nginxplus_connections_dropped` | Counter | Dropped client connections dropped | [] | +| `nginxplus_connections_idle` | Gauge | Idle client connections | [] | + +#### [HTTP](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_) + +| Name | Type | Description | Labels | +| --------------------------------- | ------- | --------------------- | ------ | +| `nginxplus_http_requests_total` | Counter | Total http requests | [] | +| `nginxplus_http_requests_current` | Gauge | Current http requests | [] | + +#### [SSL](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_ssl_object) + +| Name | Type | Description | Labels | +| --------------------------------- | ------- | ----------------------------------- | ------ | +| `nginxplus_ssl_handshakes` | Counter | Successful SSL handshakes | [] | +| `nginxplus_ssl_handshakes_failed` | Counter | Failed SSL handshakes | [] | +| `nginxplus_ssl_session_reuses` | Counter | Session reuses during SSL handshake | [] | + +#### [HTTP Server Zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone) + +| Name | Type | Description | Labels | +| ---------------------------------------- | ------- | -------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `nginxplus_server_zone_processing` | Gauge | Client requests that are currently being processed | `server_zone` | +| `nginxplus_server_zone_requests` | Counter | Total client requests | `server_zone` | +| `nginxplus_server_zone_responses` | Counter | Total responses sent to clients | `code` (the response status code. The values are: `1xx`, `2xx`, `3xx`, `4xx` and `5xx`), `server_zone` | +| `nginxplus_server_zone_responses_codes` | Counter | Total responses sent to clients by code | `code` (the response status code. The possible values are [here](https://www.nginx.com/resources/wiki/extending/api/http/)), `server_zone` | +| `nginxplus_server_zone_discarded` | Counter | Requests completed without sending a response | `server_zone` | +| `nginxplus_server_zone_received` | Counter | Bytes received from clients | `server_zone` | +| `nginxplus_server_zone_sent` | Counter | Bytes sent to clients | `server_zone` | +| `nginxplus_server_ssl_handshakes` | Counter | Successful SSL handshakes | `server_zone` | +| `nginxplus_server_ssl_handshakes_failed` | Counter | Failed SSL handshakes | `server_zone` | +| `nginxplus_server_ssl_session_reuses` | Counter | Session reuses during SSL handshake | `server_zone` | + +#### [Stream Server Zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_server_zone) + +| Name | Type | Description | Labels | +| ----------------------------------------------- | ------- | ----------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `nginxplus_stream_server_zone_processing` | Gauge | Client connections that are currently being processed | `server_zone` | +| `nginxplus_stream_server_zone_connections` | Counter | Total connections | `server_zone` | +| `nginxplus_stream_server_zone_sessions` | Counter | Total sessions completed | `code` (the response status code. The values are: `2xx`, `4xx`, and `5xx`), `server_zone` | +| `nginxplus_stream_server_zone_discarded` | Counter | Connections completed without creating a session | `server_zone` | +| `nginxplus_stream_server_zone_received` | Counter | Bytes received from clients | `server_zone` | +| `nginxplus_stream_server_zone_sent` | Counter | Bytes sent to clients | `server_zone` | +| `nginxplus_stream_server_ssl_handshakes` | Counter | Successful SSL handshakes | `server_zone` | +| `nginxplus_stream_server_ssl_handshakes_failed` | Counter | Failed SSL handshakes | `server_zone` | +| `nginxplus_stream_server_ssl_session_reuses` | Counter | Session reuses during SSL handshake | `server_zone` | + +#### [HTTP Upstreams](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream) + +> Note: for the `state` metric, the string values are converted to float64 using the following rule: `"up"` -> `1.0`, +> `"draining"` -> `2.0`, `"down"` -> `3.0`, `"unavail"` –> `4.0`, `"checking"` –> `5.0`, `"unhealthy"` -> `6.0`. + +| Name | Type | Description | Labels | +| --------------------------------------------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `nginxplus_upstream_server_state` | Gauge | Current state | `server`, `upstream` | +| `nginxplus_upstream_server_active` | Gauge | Active connections | `server`, `upstream` | +| `nginxplus_upstream_server_limit` | Gauge | Limit for connections which corresponds to the max_conns parameter of the upstream server. Zero value means there is no limit | `server`, `upstream` | +| `nginxplus_upstream_server_requests` | Counter | Total client requests | `server`, `upstream` | +| `nginxplus_upstream_server_responses` | Counter | Total responses sent to clients | `code` (the response status code. + +-- Chunk 3 -- +// /app/repos/repo_9/README.md:301-450 +| `nginxplus_upstream_server_ssl_handshakes` | Counter | Successful SSL handshakes | `server`, `upstream` | +| `nginxplus_upstream_server_ssl_handshakes_failed` | Counter | Failed SSL handshakes | `server`, `upstream` | +| `nginxplus_upstream_server_ssl_session_reuses` | Counter | Session reuses during SSL handshake | `server`, `upstream` | +| `nginxplus_upstream_keepalive` | Gauge | Idle keepalive connections | `upstream` | +| `nginxplus_upstream_zombies` | Gauge | Servers removed from the group but still processing active client requests | `upstream` | + +#### [Stream Upstreams](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_upstream) + +> Note: for the `state` metric, the string values are converted to float64 using the following rule: `"up"` -> `1.0`, +> `"down"` -> `3.0`, `"unavail"` –> `4.0`, `"checking"` –> `5.0`, `"unhealthy"` -> `6.0`. + +| Name | Type | Description | Labels | +| ---------------------------------------------------------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `nginxplus_stream_upstream_server_state` | Gauge | Current state | `server`, `upstream` | +| `nginxplus_stream_upstream_server_active` | Gauge | Active connections | `server` , `upstream` | +| `nginxplus_stream_upstream_server_limit` | Gauge | Limit for connections which corresponds to the max_conns parameter of the upstream server. Zero value means there is no limit | `server` , `upstream` | +| `nginxplus_stream_upstream_server_connections` | Counter | Total number of client connections forwarded to this server | `server`, `upstream` | +| `nginxplus_stream_upstream_server_connect_time` | Gauge | Average time to connect to the upstream server | `server`, `upstream` | +| `nginxplus_stream_upstream_server_first_byte_time` | Gauge | Average time to receive the first byte of data | `server`, `upstream` | +| `nginxplus_stream_upstream_server_response_time` | Gauge | Average time to receive the last byte of data | `server`, `upstream` | +| `nginxplus_stream_upstream_server_sent` | Counter | Bytes sent to this server | `server`, `upstream` | +| `nginxplus_stream_upstream_server_received` | Counter | Bytes received from this server | `server`, `upstream` | +| `nginxplus_stream_upstream_server_fails` | Counter | Number of unsuccessful attempts to communicate with the server | `server`, `upstream` | +| `nginxplus_stream_upstream_server_unavail` | Counter | How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold | `server`, `upstream` | +| `nginxplus_stream_upstream_server_health_checks_checks` | Counter | Total health check requests | `server`, `upstream` | +| `nginxplus_stream_upstream_server_health_checks_fails` | Counter | Failed health checks | `server`, `upstream` | +| `nginxplus_stream_upstream_server_health_checks_unhealthy` | Counter | How many times the server became unhealthy (state 'unhealthy') | `server`, `upstream` | +| `nginxplus_stream_upstream_server_ssl_handshakes` | Counter | Successful SSL handshakes | `server`, `upstream` | +| `nginxplus_stream_upstream_server_ssl_handshakes_failed` | Counter | Failed SSL handshakes | `server`, `upstream` | +| `nginxplus_stream_upstream_server_ssl_session_reuses` | Counter | Session reuses during SSL handshake | `server`, `upstream` | +| `nginxplus_stream_upstream_zombies` | Gauge | Servers removed from the group but still processing active client connections | `upstream` | + +#### [Stream Zone Sync](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_zone_sync) + +| Name | Type | Description | Labels | +| ------------------------------------------------- | ------- | ------------------------------------------------------------ | ------ | +| `nginxplus_stream_zone_sync_zone_records_pending` | Gauge | The number of records that need to be sent to the cluster | `zone` | +| `nginxplus_stream_zone_sync_zone_records_total` | Gauge | The total number of records stored in the shared memory zone | `zone` | +| `nginxplus_stream_zone_sync_zone_bytes_in` | Counter | Bytes received by this node | [] | +| `nginxplus_stream_zone_sync_zone_bytes_out` | Counter | Bytes sent by this node | [] | +| `nginxplus_stream_zone_sync_zone_msgs_in` | Counter | Total messages received by this node | [] | +| `nginxplus_stream_zone_sync_zone_msgs_out` | Counter | Total messages sent by this node | [] | +| `nginxplus_stream_zone_sync_zone_nodes_online` | Gauge | Number of peers this node is connected to | [] | + +#### [Location Zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_location_zone) + +| Name | Type | Description | Labels | +| ----------------------------------------- | ------- | --------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| `nginxplus_location_zone_requests` | Counter | Total client requests | `location_zone` | +| `nginxplus_location_zone_responses` | Counter | Total responses sent to clients | `code` (the response status code. The values are: `1xx`, `2xx`, `3xx`, `4xx` and `5xx`), `location_zone` | +| `nginxplus_location_zone_responses_codes` | Counter | Total responses sent to clients by code | `code` (the response status code. The possible values are [here](https://www.nginx.com/resources/wiki/extending/api/http/)), `location_zone` | +| `nginxplus_location_zone_discarded` | Counter | Requests completed without sending a response | `location_zone` | +| `nginxplus_location_zone_received` | Counter | Bytes received from clients | `location_zone` | +| `nginxplus_location_zone_sent` | Counter | Bytes sent to clients | `location_zone` | + +#### [Resolver](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_resolver_zone) + +| Name | Type | Description | Labels | +| ----------------------------- | ------- | ---------------------------------------------- | ---------- | +| `nginxplus_resolver_name` | Counter | Total requests to resolve names to addresses | `resolver` | +| `nginxplus_resolver_srv` | Counter | Total requests to resolve SRV records | `resolver` | +| `nginxplus_resolver_addr` | Counter | Total requests to resolve addresses to names | `resolver` | +| `nginxplus_resolver_noerror` | Counter | Total number of successful responses | `resolver` | +| `nginxplus_resolver_formerr` | Counter | Total number of FORMERR responses | `resolver` | +| `nginxplus_resolver_servfail` | Counter | Total number of SERVFAIL responses | `resolver` | +| `nginxplus_resolver_nxdomain` | Counter | Total number of NXDOMAIN responses | `resolver` | +| `nginxplus_resolver_notimp` | Counter | Total number of NOTIMP responses | `resolver` | +| `nginxplus_resolver_refused` | Counter | Total number of REFUSED responses | `resolver` | +| `nginxplus_resolver_timedout` | Counter | Total number of timed out request | `resolver` | +| `nginxplus_resolver_unknown` | Counter | Total requests completed with an unknown error | `resolver` | + +#### [HTTP Requests Rate Limiting](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_limit_req_zone) + +| Name | Type | Description | Labels | +| ------------------------------------------ | ------- | --------------------------------------------------------------------------- | ------ | +| `nginxplus_limit_request_passed` | Counter | Total number of requests that were neither limited nor accounted as limited | `zone` | +| `nginxplus_limit_request_rejected` | Counter | Total number of requests that were rejected | `zone` | +| `nginxplus_limit_request_delayed` | Counter | Total number of requests that were delayed | `zone` | +| `nginxplus_limit_request_rejected_dry_run` | Counter | Total number of requests accounted as rejected in the dry run mode | `zone` | +| `nginxplus_limit_request_delayed_dry_run` | Counter | Total number of requests accounted as delayed in the dry run mode | `zone` | + +#### [HTTP Connections Limiting](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_limit_conn_zone) + +| Name | Type | Description | Labels | +| --------------------------------------------- | ------- | ------------------------------------------------------------------------------ | ------ | +| `nginxplus_limit_connection_passed` | Counter | Total number of connections that were neither limited nor accounted as limited | `zone` | +| `nginxplus_limit_connection_rejected` | Counter | Total number of connections that were rejected | `zone` | +| `nginxplus_limit_connection_rejected_dry_run` | Counter | Total number of connections accounted as rejected in the dry run mode | `zone` | + +#### [Stream Connections Limiting](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_limit_conn_zone) + +| Name | Type | Description | Labels | +| ---------------------------------------------------- | ------- | ------------------------------------------------------------------------------ | ------ | +| `nginxplus_stream_limit_connection_passed` | Counter | Total number of connections that were neither limited nor accounted as limited | `zone` | +| `nginxplus_stream_limit_connection_rejected` | Counter | Total number of connections that were rejected | `zone` | +| `nginxplus_stream_limit_connection_rejected_dry_run` | Counter | Total number of connections accounted as rejected in the dry run mode | `zone` | + +#### [Cache](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_cache) + +| Name | Type | Description | Labels | +| ------------------------------------------- | ------- | ----------------------------------------------------------------------- | ------- | +| `nginxplus_cache_size` | Gauge | Total size of the cache | `cache` | +| `nginxplus_cache_max_size` | Gauge | Maximum size of the cache | `cache` | +| `nginxplus_cache_cold` | Gauge | Is the cache co + +-- Chunk 4 -- +// /app/repos/repo_9/README.md:451-600 +[Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-prometheus-exporter) and +[Quay.io](https://quay.io/repository/nginx/nginx-prometheus-exporter). + +As an alternative, you can choose the _edge_ version built from the [latest commit](https://github.com/nginx/nginx-prometheus-exporter/commits/main) +from the main branch. The edge version is useful for experimenting with new features that are not yet published in a +stable release. + +### Binaries + +We publish the binaries for multiple Operating Systems and architectures on the GitHub [releases page](https://github.com/nginx/nginx-prometheus-exporter/releases). + +### Homebrew + +You can add the NGINX homebrew tap with + +```console +brew tap nginx/tap +``` + +and then install the formula with + +```console +brew install nginx-prometheus-exporter +``` + +### Snap + +You can install the NGINX Prometheus Exporter from the [Snap Store](https://snapcraft.io/nginx-prometheus-exporter). + +```console +snap install nginx-prometheus-exporter +``` + +### Scoop + +You can add the NGINX Scoop bucket with + +```console +scoop bucket add nginx https://github.com/nginx/scoop-bucket.git +``` + +and then install the package with + +```console +scoop install nginx-prometheus-exporter +``` + +### Nix + +First include NUR in your packageOverrides as explained in the [NUR documentation](https://github.com/nix-community/NUR#installation). + +Then you can use the exporter with the following command: + +```console +nix-shell --packages nur.repos.nginx.nginx-prometheus-exporter +``` + +or install it with: + +```console +nix-env -f '' -iA nur.repos.nginx.nginx-prometheus-exporter +``` + +## Building the Exporter + +You can build the exporter using the provided Makefile. Before building the exporter, make sure the following software +is installed on your machine: + +- make +- git +- Docker for building the container image +- Go for building the binary + +### Building the Docker Image + +To build the Docker image with the exporter, run: + +```console +make container +``` + +Note: go is not required, as the exporter binary is built in a Docker container. See the [Dockerfile](build/Dockerfile). + +### Building the Binary + +To build the binary, run: + +```console +make +``` + +Note: the binary is built for the OS/arch of your machine. To build binaries for other platforms, see the +[Makefile](Makefile). + +The binary is built with the name `nginx-prometheus-exporter`. + +## Grafana Dashboard + +The official Grafana dashboard is provided with the exporter for NGINX. Check the [Grafana +Dashboard](./grafana/README.md) documentation for more information. + +## SBOM (Software Bill of Materials) + +We generate SBOMs for the binaries and the Docker image. + +### Binaries + +The SBOMs for the binaries are available in the releases page. The SBOMs are generated using +[syft](https://github.com/anchore/syft) and are available in SPDX format. + +### Docker Image + +The SBOM for the Docker image is available in the +[DockerHub](https://hub.docker.com/r/nginx/nginx-prometheus-exporter), +[GitHub Container registry](https://github.com/nginx/nginx-prometheus-exporter/pkgs/container/nginx-prometheus-exporter), +[Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-prometheus-exporter) and +[Quay.io](https://quay.io/repository/nginx/nginx-prometheus-exporter) repositories. The SBOMs are generated using +[syft](https://github.com/anchore/syft) and stored as an attestation in the image manifest. + +For example to retrieve the SBOM for `linux/amd64` from Docker Hub and analyze it using +[grype](https://github.com/anchore/grype) you can run the following command: + +```console +docker buildx imagetools inspect nginx/nginx-prometheus-exporter:edge --format '{{ json (index .SBOM "linux/amd64").SPDX }}' | grype +``` + +## Provenance + +We generate provenance for the Docker image and it's available in the +[DockerHub](https://hub.docker.com/r/nginx/nginx-prometheus-exporter), +[GitHub Container registry](https://github.com/nginx/nginx-prometheus-exporter/pkgs/container/nginx-prometheus-exporter), +[Amazon ECR Public Gallery](https://gallery.ecr.aws/nginx/nginx-prometheus-exporter) and +[Quay.io](https://quay.io/repository/nginx/nginx-prometheus-exporter) repositories, stored as an attestation in the +image manifest. + +For example to retrieve the provenance for `linux/amd64` from Docker Hub you can run the following command: + +```console +docker buildx imagetools inspect nginx/nginx-prometheus-exporter:edge --format '{{ json (index .Provenance "linux/amd64").SLSA }}' +``` + +## Contacts + +We’d like to hear your feedback! If you have any suggestions or experience issues with the NGINX Prometheus Exporter, +please create an issue or send a pull request on GitHub. You can contact us on the [NGINX Community Forums](https://community.nginx.org/). + +## Contributing + +If you'd like to contribute to the project, please read our [Contributing guide](CONTRIBUTING.md). + + +-- Chunk 5 -- +// /app/repos/repo_9/README.md:601-608 +## Support + +The commercial support is available for NGINX Plus customers when the NGINX Prometheus Exporter is used with NGINX +Ingress Controller. + +## License + +[Apache License, Version 2.0](LICENSE). + +=== File: .golangci.yml === + +-- Chunk 1 -- +// /app/repos/repo_9/.golangci.yml:1-122 +version: "2" +linters: + default: none + enable: + - asasalint + - asciicheck + - bidichk + - containedctx + - contextcheck + - copyloopvar + - dupword + - durationcheck + - errcheck + - errchkjson + - errname + - errorlint + - fatcontext + - forcetypeassert + - gocheckcompilerdirectives + - gochecksumtype + - gocritic + - godot + - gosec + - gosmopolitan + - govet + - ineffassign + - intrange + - loggercheck + - makezero + - mirror + - misspell + - musttag + - nilerr + - nilnil + - noctx + - nolintlint + - paralleltest + - perfsprint + - prealloc + - predeclared + - promlinter + - reassign + - revive + - sloglint + - staticcheck + - tagalign + - thelper + - tparallel + - unconvert + - unparam + - unused + - usestdlibvars + - wastedassign + - whitespace + - wrapcheck + settings: + govet: + enable-all: true + misspell: + locale: US + revive: + rules: + - name: blank-imports + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: empty-block + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: redefines-builtin-id + - name: string-of-int + - name: superfluous-else + - name: time-naming + - name: unchecked-type-assertion + - name: unexported-return + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + sloglint: + static-msg: true + key-naming-case: snake + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +formatters: + enable: + - gofmt + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ + +=== File: go.sum === + +-- Chunk 1 -- +// /app/repos/repo_9/go.sum:1-78 +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nginx/nginx-plus-go-client/v2 v2.4.0 h1:4c7V57CLCZUOxQCUcS9G8a5MClzdmxByBm+f4zKMzAY= +github.com/nginx/nginx-plus-go-client/v2 v2.4.0/go.mod h1:P+dIP2oKYzFoyf/zlLWQa8Sf+fHb+CclOKzxAjxpvug= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= +github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + +=== File: .yamllint.yaml === + +-- Chunk 1 -- +// /app/repos/repo_9/.yamllint.yaml:1-18 +--- +ignore-from-file: .gitignore + +extends: default + +rules: + comments: + min-spaces-from-content: 1 + comments-indentation: enable + document-start: disable + empty-values: enable + line-length: + max: 120 + ignore: | + .goreleaser.yml + .github/ + truthy: + check-keys: false + +=== File: go.mod === + +-- Chunk 1 -- +// /app/repos/repo_9/go.mod:1-34 +module github.com/nginx/nginx-prometheus-exporter + +go 1.24.2 + +require ( + github.com/alecthomas/kingpin/v2 v2.4.0 + github.com/nginx/nginx-plus-go-client/v2 v2.4.0 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/common v0.63.0 + github.com/prometheus/exporter-toolkit v0.14.0 +) + +require ( + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) + +=== File: .editorconfig === + +-- Chunk 1 -- +// /app/repos/repo_9/.editorconfig:1-13 +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +indent_size = 4 +indent_style = tab + +[*.{md,yml,yaml}] +indent_size = 2 +indent_style = space + +=== File: .goreleaser.yml === + +-- Chunk 1 -- +// /app/repos/repo_9/.goreleaser.yml:1-150 +version: 2 +before: + hooks: + - ./scripts/completions.sh + - ./scripts/manpages.sh + +builds: + - env: + - CGO_ENABLED=0 + goos: + - darwin + - freebsd + - linux + - solaris + - windows + goarch: + - 386 + - amd64 + - arm + - arm64 + - mips64 + - mips64le + - ppc64 + - ppc64le + - riscv64 + - s390x + goarm: + - 5 + - 6 + - 7 + gomips: + - softfloat + ignore: + - goos: windows + goarch: arm + flags: + - -trimpath + ldflags: + - "-s -w -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.BuildDate={{.Date}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser=goreleaser" + +changelog: + disable: true + +archives: + - format_overrides: + - goos: windows + format: zip + files: + - README.md + - LICENSE + - completions/* + - manpages/* + +sboms: + - artifacts: archive + documents: + - "${artifact}.spdx.json" + +brews: + - repository: + owner: nginx + name: homebrew-tap + token: "{{ .Env.NGINX_GITHUB_TOKEN }}" + directory: Formula + homepage: https://github.com/nginx/nginx-prometheus-exporter + description: NGINX Prometheus Exporter for NGINX and NGINX Plus + license: Apache-2.0 + commit_author: + name: nginx-bot + email: integrations@nginx.com + extra_install: |- + bash_completion.install "completions/nginx-prometheus-exporter.bash" => "nginx-prometheus-exporter" + zsh_completion.install "completions/nginx-prometheus-exporter.zsh" => "_nginx-prometheus-exporter" + man1.install "manpages/nginx-prometheus-exporter.1.gz" + +signs: + - cmd: cosign + artifacts: checksum + output: true + certificate: "${artifact}.pem" + args: + - sign-blob + - "--output-signature=${signature}" + - "--output-certificate=${certificate}" + - "${artifact}" + - "--yes" + +milestones: + - close: true + +snapcrafts: + - name_template: "{{ .ProjectName }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + title: NGINX Prometheus Exporter + summary: NGINX Prometheus Exporter for NGINX and NGINX Plus + description: | + NGINX Prometheus exporter fetches the metrics from NGINX or NGINX Plus, + converts the metrics into appropriate Prometheus metrics types and finally exposes + them via an HTTP server to be collected by Prometheus. + grade: stable + confinement: strict + publish: true + license: "Apache-2.0" + apps: + nginx-prometheus-exporter: + command: nginx-prometheus-exporter + plugs: ["network", "network-bind"] + completer: completions/nginx-prometheus-exporter.bash + disable: "{{ if .IsSnapshot }}true{{ end }}" + +nix: + - name: nginx-prometheus-exporter + repository: + owner: nginx + name: nur + token: "{{ .Env.NGINX_GITHUB_TOKEN }}" + homepage: https://github.com/nginx/nginx-prometheus-exporter + description: NGINX Prometheus Exporter for NGINX and NGINX Plus + license: asl20 + commit_author: + name: nginx-bot + email: integrations@nginx.com + extra_install: |- + installManPage ./manpages/nginx-prometheus-exporter.1.gz + installShellCompletion ./completions/* + +winget: + - name: nginx-prometheus-exporter + publisher: nginx + license: Apache-2.0 + homepage: https://github.com/nginx/nginx-prometheus-exporter + short_description: NGINX Prometheus Exporter for NGINX and NGINX Plus + repository: + owner: nginx + name: winget-pkgs + token: "{{ .Env.NGINX_GITHUB_TOKEN }}" + branch: "nginx-prometheus-exporter-{{.Version}}" + pull_request: + enabled: true + draft: true + base: + owner: microsoft + name: winget-pkgs + branch: master + +scoops: + - repository: + owner: nginx + name: scoop-bucket + token: "{{ .Env.NGINX_GITHUB_TOKEN }}" + directory: bucket + +-- Chunk 2 -- +// /app/repos/repo_9/.goreleaser.yml:151-156 + homepage: https://github.com/nginx/nginx-prometheus-exporter + description: NGINX Prometheus Exporter for NGINX and NGINX Plus + license: Apache-2.0 + commit_author: + name: nginx-bot + email: integrations@nginx.com + +=== File: .gitignore === + +-- Chunk 1 -- +// /app/repos/repo_9/.gitignore:1-15 +# NGINX Plus license files +*.crt +*.key + +# Visual Studio Code settings +.vscode + +# the binary +nginx-prometheus-exporter +dist/ + +completions/ +manpages/ + +coverage.txt + +=== File: grafana/dashboard.json === + +-- Chunk 1 -- +// /app/repos/repo_9/grafana/dashboard.json:1-150 +{ + "__inputs": [ + { + "description": "", + "label": "Prometheus", + "name": "DS_PROMETHEUS", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "5.0.0" + }, + { + "id": "graph", + "name": "Graph", + "type": "panel", + "version": "" + }, + { + "id": "prometheus", + "name": "Prometheus", + "type": "datasource", + "version": "1.0.0" + }, + { + "id": "singlestat", + "name": "Singlestat", + "type": "panel", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Official dashboard for NGINX Prometheus exporter", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1562682051068, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 4, + "panels": [], + "title": "Status", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": true, + "colorPostfix": false, + "colorPrefix": false, + "colorValue": false, + "colors": [ + "#E02F44", + "#FF9830", + "#299c46" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": null, + "description": "", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 8, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": "instance", + "repeatDirection": "h", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "nginx_up{instance=~\"$instance\"}", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "1,1", + "timeFrom": null, + "timeShift": null, + +-- Chunk 2 -- +// /app/repos/repo_9/grafana/dashboard.json:151-300 + "title": "NGINX Status for $instance", + "type": "singlestat", + "valueFontSize": "100%", + "valueMaps": [ + { + "op": "=", + "text": "Down", + "value": "0" + }, + { + "op": "=", + "text": "Up", + "value": "1" + } + ], + "valueName": "current" + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 6, + "panels": [], + "title": "Metrics", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": null, + "description": "", + "fill": 1, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": {}, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(nginx_connections_accepted{instance=~\"$instance\"}[5m])", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "{{instance}} accepted", + "refId": "A" + }, + { + "expr": "irate(nginx_connections_handled{instance=~\"$instance\"}[5m])", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "{{instance}} handled", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Processed connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": "Connections (rate)", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "fill": 1, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + +-- Chunk 3 -- +// /app/repos/repo_9/grafana/dashboard.json:301-450 + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": {}, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "nginx_connections_active{instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} active", + "refId": "A" + }, + { + "expr": "nginx_connections_reading{instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} reading", + "refId": "B" + }, + { + "expr": "nginx_connections_waiting{instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} waiting", + "refId": "C" + }, + { + "expr": "nginx_connections_writing{instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} writing", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "Connections", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": {}, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(nginx_http_requests_total{instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}} total requests", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + +-- Chunk 4 -- +// /app/repos/repo_9/grafana/dashboard.json:451-567 + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 18, + "style": "dark", + "tags": [ + "nginx", + "prometheus", + "nginx prometheus exporter" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "tags": [], + "text": "default", + "value": "default" + }, + "hide": 0, + "includeAll": false, + "label": "datasource", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(nginx_up, instance)", + "hide": 0, + "includeAll": true, + "label": "", + "multi": true, + "name": "instance", + "options": [], + "query": "label_values(nginx_up, instance)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "NGINX", + "uid": "MsjffzSZz", + "version": 1 +} + +=== File: grafana/README.md === + +-- Chunk 1 -- +// /app/repos/repo_9/grafana/README.md:1-52 +# Grafana Dashboard + +We provide the official Grafana dashboard that visualizes the NGINX metrics exposed by the exporter. The dashboard +allows you to filter metrics per instance or see the metrics from all instances. + + + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Installing the Dashboard](#installing-the-dashboard) +- [Graphs](#graphs) + + + +## Prerequisites + +The dashboard has been tested with the following software versions: + +- NGINX Prometheus Exporter >= 0.4.1 +- Grafana >= v5.0.0 +- Prometheus >= v2.0.0 + +A Prometheus data source needs to be [added](https://prometheus.io/docs/visualization/grafana/#using) before installing +the dashboard. + +## Installing the Dashboard + +In the Grafana UI complete the following steps: + +1. Use the *New Dashboard* button and click *Import*. +2. Upload `dashboard.json` or copy and paste the contents of the file in the textbox and click *Load*. +3. Set the Prometheus data source and click *Import*. +4. The dashboard will appear. Note how you can filter the instance label just below the dashboard title (top left + corner). This allows you to filter metrics per instance. By default, all instances are selected. + +![dashboard](./dashboard.png) + +## Graphs + +The dashboard comes with 2 rows with the following graphs for NGINX metrics: + +- Status + - Up/Down graph per instance. It shows the `nginx_up` metric. +- Metrics + - Processed connections (`nginx_connections_accepted` and `nginx_connections_handled` metrics). This graph shows an + [irate](https://prometheus.io/docs/prometheus/latest/querying/functions/#irate) in a range of 5 minutes. Useful for + seeing the variation of the processed connections in time. + - Active connections (`nginx_connections_active`, `nginx_connections_reading`, `nginx_connections_waiting` and + `nginx_connections_writing`). Useful for checking what is happening right now. + - Total Requests with an irate (5 minutes range too) of the total number of client requests + (`nginx_http_requests_total`) over time. + +=== File: client/nginx_test.go === + +-- Chunk 1 -- +// nginx_test.go:10-51 +func TestParseStubStatsValidInput(t *testing.T) { + t.Parallel() + + tests := []struct { + input []byte + expectedResult StubStats + expectedError bool + }{ + { + input: []byte(validStabStats), + expectedResult: StubStats{ + Connections: StubConnections{ + Active: 1457, + Accepted: 6717066, + Handled: 6717066, + Reading: 1, + Writing: 8, + Waiting: 1448, + }, + Requests: 65844359, + }, + expectedError: false, + }, + { + input: []byte("invalid-stats"), + expectedError: true, + }, + } + + for _, test := range tests { + r := bytes.NewReader(test.input) + result, err := parseStubStats(r) + + if err != nil && !test.expectedError { + t.Errorf("parseStubStats() returned error for valid input %q: %v", string(test.input), err) + } + + if !test.expectedError && test.expectedResult != *result { + t.Errorf("parseStubStats() result %v != expected %v for input %q", result, test.expectedResult, test.input) + } + } +} + +=== File: client/nginx.go === + +-- Chunk 1 -- +// nginx.go:18-21 +type NginxClient struct { + httpClient *http.Client + apiEndpoint string +} + +-- Chunk 2 -- +// nginx.go:24-27 +type StubStats struct { + Connections StubConnections + Requests int64 +} + +-- Chunk 3 -- +// nginx.go:30-37 +type StubConnections struct { + Active int64 + Accepted int64 + Handled int64 + Reading int64 + Writing int64 + Waiting int64 +} + +-- Chunk 4 -- +// nginx.go:40-47 +func NewNginxClient(httpClient *http.Client, apiEndpoint string) *NginxClient { + client := &NginxClient{ + apiEndpoint: apiEndpoint, + httpClient: httpClient, + } + + return client +} + +-- Chunk 5 -- +// nginx.go:50-80 +func (client *NginxClient) GetStubStats() (*StubStats, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, client.apiEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("failed to create a get request: %w", err) + } + resp, err := client.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to get %v: %w", client.apiEndpoint, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("expected %v response, got %v", http.StatusOK, resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read the response body: %w", err) + } + + r := bytes.NewReader(body) + stats, err := parseStubStats(r) + if err != nil { + return nil, fmt.Errorf("failed to parse response body %q: %w", string(body), err) + } + + return stats, nil +} + +-- Chunk 6 -- +// nginx.go:82-95 +func parseStubStats(r io.Reader) (*StubStats, error) { + var s StubStats + if _, err := fmt.Fscanf(r, templateMetrics, + &s.Connections.Active, + &s.Connections.Accepted, + &s.Connections.Handled, + &s.Requests, + &s.Connections.Reading, + &s.Connections.Writing, + &s.Connections.Waiting); err != nil { + return nil, fmt.Errorf("failed to scan template metrics: %w", err) + } + return &s, nil +} + +=== File: collector/nginx_plus.go === + +-- Chunk 1 -- +// nginx_plus.go:15-30 +type LabelUpdater interface { + UpdateUpstreamServerPeerLabels(upstreamServerPeerLabels map[string][]string) + DeleteUpstreamServerPeerLabels(peers []string) + UpdateUpstreamServerLabels(upstreamServerLabelValues map[string][]string) + DeleteUpstreamServerLabels(upstreamNames []string) + UpdateStreamUpstreamServerPeerLabels(streamUpstreamServerPeerLabels map[string][]string) + DeleteStreamUpstreamServerPeerLabels(peers []string) + UpdateStreamUpstreamServerLabels(streamUpstreamServerPeerLabels map[string][]string) + DeleteStreamUpstreamServerLabels(peers []string) + UpdateServerZoneLabels(serverZoneLabelValues map[string][]string) + DeleteServerZoneLabels(zoneNames []string) + UpdateStreamServerZoneLabels(streamServerZoneLabelValues map[string][]string) + DeleteStreamServerZoneLabels(zoneNames []string) + UpdateCacheZoneLabels(cacheLabelValues map[string][]string) + DeleteCacheZoneLabels(cacheNames []string) +} + +-- Chunk 2 -- +// nginx_plus.go:33-62 +type NginxPlusCollector struct { + upMetric prometheus.Gauge + logger *slog.Logger + cacheZoneMetrics map[string]*prometheus.Desc + workerMetrics map[string]*prometheus.Desc + nginxClient *plusclient.NginxClient + streamServerZoneMetrics map[string]*prometheus.Desc + streamZoneSyncMetrics map[string]*prometheus.Desc + streamUpstreamMetrics map[string]*prometheus.Desc + streamUpstreamServerMetrics map[string]*prometheus.Desc + locationZoneMetrics map[string]*prometheus.Desc + resolverMetrics map[string]*prometheus.Desc + limitRequestMetrics map[string]*prometheus.Desc + limitConnectionMetrics map[string]*prometheus.Desc + streamLimitConnectionMetrics map[string]*prometheus.Desc + upstreamServerMetrics map[string]*prometheus.Desc + upstreamMetrics map[string]*prometheus.Desc + streamUpstreamServerPeerLabels map[string][]string + serverZoneMetrics map[string]*prometheus.Desc + upstreamServerLabels map[string][]string + streamUpstreamServerLabels map[string][]string + serverZoneLabels map[string][]string + streamServerZoneLabels map[string][]string + upstreamServerPeerLabels map[string][]string + cacheZoneLabels map[string][]string + totalMetrics map[string]*prometheus.Desc + variableLabelNames VariableLabelNames + variableLabelsMutex sync.RWMutex + mutex sync.Mutex +} + +-- Chunk 3 -- +// nginx_plus.go:65-71 +func (c *NginxPlusCollector) UpdateUpstreamServerPeerLabels(upstreamServerPeerLabels map[string][]string) { + c.variableLabelsMutex.Lock() + for k, v := range upstreamServerPeerLabels { + c.upstreamServerPeerLabels[k] = v + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 4 -- +// nginx_plus.go:74-80 +func (c *NginxPlusCollector) DeleteUpstreamServerPeerLabels(peers []string) { + c.variableLabelsMutex.Lock() + for _, k := range peers { + delete(c.upstreamServerPeerLabels, k) + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 5 -- +// nginx_plus.go:83-89 +func (c *NginxPlusCollector) UpdateStreamUpstreamServerPeerLabels(streamUpstreamServerPeerLabels map[string][]string) { + c.variableLabelsMutex.Lock() + for k, v := range streamUpstreamServerPeerLabels { + c.streamUpstreamServerPeerLabels[k] = v + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 6 -- +// nginx_plus.go:92-98 +func (c *NginxPlusCollector) DeleteStreamUpstreamServerPeerLabels(peers []string) { + c.variableLabelsMutex.Lock() + for _, k := range peers { + delete(c.streamUpstreamServerPeerLabels, k) + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 7 -- +// nginx_plus.go:101-107 +func (c *NginxPlusCollector) UpdateUpstreamServerLabels(upstreamServerLabelValues map[string][]string) { + c.variableLabelsMutex.Lock() + for k, v := range upstreamServerLabelValues { + c.upstreamServerLabels[k] = v + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 8 -- +// nginx_plus.go:110-116 +func (c *NginxPlusCollector) DeleteUpstreamServerLabels(upstreamNames []string) { + c.variableLabelsMutex.Lock() + for _, k := range upstreamNames { + delete(c.upstreamServerLabels, k) + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 9 -- +// nginx_plus.go:119-125 +func (c *NginxPlusCollector) UpdateStreamUpstreamServerLabels(streamUpstreamServerLabelValues map[string][]string) { + c.variableLabelsMutex.Lock() + for k, v := range streamUpstreamServerLabelValues { + c.streamUpstreamServerLabels[k] = v + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 10 -- +// nginx_plus.go:128-134 +func (c *NginxPlusCollector) DeleteStreamUpstreamServerLabels(streamUpstreamNames []string) { + c.variableLabelsMutex.Lock() + for _, k := range streamUpstreamNames { + delete(c.streamUpstreamServerLabels, k) + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 11 -- +// nginx_plus.go:137-143 +func (c *NginxPlusCollector) UpdateServerZoneLabels(serverZoneLabelValues map[string][]string) { + c.variableLabelsMutex.Lock() + for k, v := range serverZoneLabelValues { + c.serverZoneLabels[k] = v + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 12 -- +// nginx_plus.go:146-152 +func (c *NginxPlusCollector) DeleteServerZoneLabels(zoneNames []string) { + c.variableLabelsMutex.Lock() + for _, k := range zoneNames { + delete(c.serverZoneLabels, k) + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 13 -- +// nginx_plus.go:155-161 +func (c *NginxPlusCollector) UpdateStreamServerZoneLabels(streamServerZoneLabelValues map[string][]string) { + c.variableLabelsMutex.Lock() + for k, v := range streamServerZoneLabelValues { + c.streamServerZoneLabels[k] = v + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 14 -- +// nginx_plus.go:164-170 +func (c *NginxPlusCollector) DeleteStreamServerZoneLabels(zoneNames []string) { + c.variableLabelsMutex.Lock() + for _, k := range zoneNames { + delete(c.streamServerZoneLabels, k) + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 15 -- +// nginx_plus.go:173-179 +func (c *NginxPlusCollector) UpdateCacheZoneLabels(cacheZoneLabelValues map[string][]string) { + c.variableLabelsMutex.Lock() + for k, v := range cacheZoneLabelValues { + c.cacheZoneLabels[k] = v + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 16 -- +// nginx_plus.go:182-188 +func (c *NginxPlusCollector) DeleteCacheZoneLabels(cacheZoneNames []string) { + c.variableLabelsMutex.Lock() + for _, k := range cacheZoneNames { + delete(c.cacheZoneLabels, k) + } + c.variableLabelsMutex.Unlock() +} + +-- Chunk 17 -- +// nginx_plus.go:190-194 +func (c *NginxPlusCollector) getUpstreamServerLabelValues(upstreamName string) []string { + c.variableLabelsMutex.RLock() + defer c.variableLabelsMutex.RUnlock() + return c.upstreamServerLabels[upstreamName] +} + +-- Chunk 18 -- +// nginx_plus.go:196-200 +func (c *NginxPlusCollector) getStreamUpstreamServerLabelValues(upstreamName string) []string { + c.variableLabelsMutex.RLock() + defer c.variableLabelsMutex.RUnlock() + return c.streamUpstreamServerLabels[upstreamName] +} + +-- Chunk 19 -- +// nginx_plus.go:202-206 +func (c *NginxPlusCollector) getServerZoneLabelValues(zoneName string) []string { + c.variableLabelsMutex.RLock() + defer c.variableLabelsMutex.RUnlock() + return c.serverZoneLabels[zoneName] +} + +-- Chunk 20 -- +// nginx_plus.go:208-212 +func (c *NginxPlusCollector) getStreamServerZoneLabelValues(zoneName string) []string { + c.variableLabelsMutex.RLock() + defer c.variableLabelsMutex.RUnlock() + return c.streamServerZoneLabels[zoneName] +} + +-- Chunk 21 -- +// nginx_plus.go:214-218 +func (c *NginxPlusCollector) getUpstreamServerPeerLabelValues(peer string) []string { + c.variableLabelsMutex.RLock() + defer c.variableLabelsMutex.RUnlock() + return c.upstreamServerPeerLabels[peer] +} + +-- Chunk 22 -- +// nginx_plus.go:220-224 +func (c *NginxPlusCollector) getStreamUpstreamServerPeerLabelValues(peer string) []string { + c.variableLabelsMutex.RLock() + defer c.variableLabelsMutex.RUnlock() + return c.streamUpstreamServerPeerLabels[peer] +} + +-- Chunk 23 -- +// nginx_plus.go:226-230 +func (c *NginxPlusCollector) getCacheZoneLabelValues(cacheName string) []string { + c.variableLabelsMutex.RLock() + defer c.variableLabelsMutex.RUnlock() + return c.cacheZoneLabels[cacheName] +} + +-- Chunk 24 -- +// nginx_plus.go:233-241 +type VariableLabelNames struct { + UpstreamServerVariableLabelNames []string + ServerZoneVariableLabelNames []string + UpstreamServerPeerVariableLabelNames []string + StreamUpstreamServerPeerVariableLabelNames []string + StreamServerZoneVariableLabelNames []string + StreamUpstreamServerVariableLabelNames []string + CacheZoneVariableLabelNames []string +} + +-- Chunk 25 -- +// nginx_plus.go:244-256 +func NewVariableLabelNames(upstreamServerVariableLabelNames []string, serverZoneVariableLabelNames []string, upstreamServerPeerVariableLabelNames []string, + streamUpstreamServerVariableLabelNames []string, streamServerZoneLabels []string, streamUpstreamServerPeerVariableLabelNames []string, cacheZoneVariableLabelNames []string, +) VariableLabelNames { + return VariableLabelNames{ + UpstreamServerVariableLabelNames: upstreamServerVariableLabelNames, + ServerZoneVariableLabelNames: serverZoneVariableLabelNames, + UpstreamServerPeerVariableLabelNames: upstreamServerPeerVariableLabelNames, + StreamUpstreamServerVariableLabelNames: streamUpstreamServerVariableLabelNames, + StreamServerZoneVariableLabelNames: streamServerZoneLabels, + StreamUpstreamServerPeerVariableLabelNames: streamUpstreamServerPeerVariableLabelNames, + CacheZoneVariableLabelNames: cacheZoneVariableLabelNames, + } +} + +-- Chunk 26 -- +// nginx_plus.go:259-408 +func NewNginxPlusCollector(nginxClient *plusclient.NginxClient, namespace string, variableLabelNames VariableLabelNames, constLabels map[string]string, logger *slog.Logger) *NginxPlusCollector { + upstreamServerVariableLabelNames := variableLabelNames.UpstreamServerVariableLabelNames + streamUpstreamServerVariableLabelNames := variableLabelNames.StreamUpstreamServerVariableLabelNames + + upstreamServerVariableLabelNames = append(upstreamServerVariableLabelNames, variableLabelNames.UpstreamServerPeerVariableLabelNames...) + streamUpstreamServerVariableLabelNames = append(streamUpstreamServerVariableLabelNames, variableLabelNames.StreamUpstreamServerPeerVariableLabelNames...) + return &NginxPlusCollector{ + variableLabelNames: variableLabelNames, + upstreamServerLabels: make(map[string][]string), + serverZoneLabels: make(map[string][]string), + streamServerZoneLabels: make(map[string][]string), + upstreamServerPeerLabels: make(map[string][]string), + streamUpstreamServerPeerLabels: make(map[string][]string), + streamUpstreamServerLabels: make(map[string][]string), + cacheZoneLabels: make(map[string][]string), + nginxClient: nginxClient, + logger: logger, + totalMetrics: map[string]*prometheus.Desc{ + "connections_accepted": newGlobalMetric(namespace, "connections_accepted", "Accepted client connections", constLabels), + "connections_dropped": newGlobalMetric(namespace, "connections_dropped", "Dropped client connections", constLabels), + "connections_active": newGlobalMetric(namespace, "connections_active", "Active client connections", constLabels), + "connections_idle": newGlobalMetric(namespace, "connections_idle", "Idle client connections", constLabels), + "http_requests_total": newGlobalMetric(namespace, "http_requests_total", "Total http requests", constLabels), + "http_requests_current": newGlobalMetric(namespace, "http_requests_current", "Current http requests", constLabels), + "ssl_handshakes": newGlobalMetric(namespace, "ssl_handshakes", "Successful SSL handshakes", constLabels), + "ssl_handshakes_failed": newGlobalMetric(namespace, "ssl_handshakes_failed", "Failed SSL handshakes", constLabels), + "ssl_session_reuses": newGlobalMetric(namespace, "ssl_session_reuses", "Session reuses during SSL handshake", constLabels), + }, + serverZoneMetrics: map[string]*prometheus.Desc{ + "processing": newServerZoneMetric(namespace, "processing", "Client requests that are currently being processed", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + "requests": newServerZoneMetric(namespace, "requests", "Total client requests", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + "responses_1xx": newServerZoneMetric(namespace, "responses", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "1xx"})), + "responses_2xx": newServerZoneMetric(namespace, "responses", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "2xx"})), + "responses_3xx": newServerZoneMetric(namespace, "responses", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "3xx"})), + "responses_4xx": newServerZoneMetric(namespace, "responses", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "4xx"})), + "responses_5xx": newServerZoneMetric(namespace, "responses", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "5xx"})), + "discarded": newServerZoneMetric(namespace, "discarded", "Requests completed without sending a response", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + "received": newServerZoneMetric(namespace, "received", "Bytes received from clients", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + "sent": newServerZoneMetric(namespace, "sent", "Bytes sent to clients", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + "codes_100": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "100"})), + "codes_101": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "101"})), + "codes_102": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "102"})), + "codes_200": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "200"})), + "codes_201": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "201"})), + "codes_202": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "202"})), + "codes_204": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "204"})), + "codes_206": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "206"})), + "codes_300": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "300"})), + "codes_301": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "301"})), + "codes_302": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "302"})), + "codes_303": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "303"})), + "codes_304": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "304"})), + "codes_307": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "307"})), + "codes_400": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "400"})), + "codes_401": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "401"})), + "codes_403": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "403"})), + "codes_404": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "404"})), + "codes_405": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "405"})), + "codes_408": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "408"})), + "codes_409": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "409"})), + "codes_411": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "411"})), + "codes_412": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "412"})), + "codes_413": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "413"})), + "codes_414": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "414"})), + "codes_415": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "415"})), + "codes_416": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "416"})), + "codes_429": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "429"})), + "codes_444": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "444"})), + "codes_494": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "494"})), + "codes_495": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "495"})), + "codes_496": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "496"})), + "codes_497": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "497"})), + "codes_499": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "499"})), + "codes_500": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "500"})), + "codes_501": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "501"})), + "codes_502": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "502"})), + "codes_503": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "503"})), + "codes_504": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "504"})), + "codes_507": newServerZoneMetric(namespace, "responses_codes", "Total responses sent to clients", variableLabelNames.ServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "507"})), + "ssl_handshakes": newServerZoneMetric(namespace, "ssl_handshakes", "Successful SSL handshakes", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + "ssl_handshakes_failed": newServerZoneMetric(namespace, "ssl_handshakes_failed", "Failed SSL handshakes", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + "ssl_session_reuses": newServerZoneMetric(namespace, "ssl_session_reuses", "Session reuses during SSL handshake", variableLabelNames.ServerZoneVariableLabelNames, constLabels), + }, + streamServerZoneMetrics: map[string]*prometheus.Desc{ + "processing": newStreamServerZoneMetric(namespace, "processing", "Client connections that are currently being processed", variableLabelNames.StreamServerZoneVariableLabelNames, constLabels), + "connections": newStreamServerZoneMetric(namespace, "connections", "Total connections", variableLabelNames.StreamServerZoneVariableLabelNames, constLabels), + "sessions_2xx": newStreamServerZoneMetric(namespace, "sessions", "Total sessions completed", variableLabelNames.StreamServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "2xx"})), + "sessions_4xx": newStreamServerZoneMetric(namespace, "sessions", "Total sessions completed", variableLabelNames.StreamServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "4xx"})), + "sessions_5xx": newStreamServerZoneMetric(namespace, "sessions", "Total sessions completed", variableLabelNames.StreamServerZoneVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "5xx"})), + "discarded": newStreamServerZoneMetric(namespace, "discarded", "Connections completed without creating a session", variableLabelNames.StreamServerZoneVariableLabelNames, constLabels), + "received": newStreamServerZoneMetric(namespace, "received", "Bytes received from clients", variableLabelNames.StreamServerZoneVariableLabelNames, constLabels), + "sent": newStreamServerZoneMetric(namespace, "sent", "Bytes sent to clients", variableLabelNames.StreamServerZoneVariableLabelNames, constLabels), + "ssl_handshakes": newStreamServerZoneMetric(namespace, "ssl_handshakes", "Successful SSL handshakes", variableLabelNames.StreamServerZoneVariableLabelNames, constLabels), + "ssl_handshakes_failed": newStreamServerZoneMetric(namespace, "ssl_handshakes_failed", "Failed SSL handshakes", variableLabelNames.StreamServerZoneVariableLabelNames, constLabels), + "ssl_session_reuses": newStreamServerZoneMetric(namespace, "ssl_session_reuses", "Session reuses during SSL handshake", va + +-- Chunk 27 -- +// nginx_plus.go:409-558 + "codes_444": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "444"})), + "codes_494": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "494"})), + "codes_495": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "495"})), + "codes_496": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "496"})), + "codes_497": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "497"})), + "codes_499": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "499"})), + "codes_500": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "500"})), + "codes_501": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "501"})), + "codes_502": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "502"})), + "codes_503": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "503"})), + "codes_504": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "504"})), + "codes_507": newUpstreamServerMetric(namespace, "responses_codes", "Total responses sent to clients", upstreamServerVariableLabelNames, MergeLabels(constLabels, prometheus.Labels{"code": "507"})), + "ssl_handshakes": newUpstreamServerMetric(namespace, "ssl_handshakes", "Successful SSL handshakes", upstreamServerVariableLabelNames, constLabels), + "ssl_handshakes_failed": newUpstreamServerMetric(namespace, "ssl_handshakes_failed", "Failed SSL handshakes", upstreamServerVariableLabelNames, constLabels), + "ssl_session_reuses": newUpstreamServerMetric(namespace, "ssl_session_reuses", "Session reuses during SSL handshake", upstreamServerVariableLabelNames, constLabels), + }, + streamUpstreamServerMetrics: map[string]*prometheus.Desc{ + "state": newStreamUpstreamServerMetric(namespace, "state", "Current state", streamUpstreamServerVariableLabelNames, constLabels), + "active": newStreamUpstreamServerMetric(namespace, "active", "Active connections", streamUpstreamServerVariableLabelNames, constLabels), + "limit": newStreamUpstreamServerMetric(namespace, "limit", "Limit for connections which corresponds to the max_conns parameter of the upstream server. Zero value means there is no limit", streamUpstreamServerVariableLabelNames, constLabels), + "sent": newStreamUpstreamServerMetric(namespace, "sent", "Bytes sent to this server", streamUpstreamServerVariableLabelNames, constLabels), + "received": newStreamUpstreamServerMetric(namespace, "received", "Bytes received from this server", streamUpstreamServerVariableLabelNames, constLabels), + "fails": newStreamUpstreamServerMetric(namespace, "fails", "Number of unsuccessful attempts to communicate with the server", streamUpstreamServerVariableLabelNames, constLabels), + "unavail": newStreamUpstreamServerMetric(namespace, "unavail", "How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold", streamUpstreamServerVariableLabelNames, constLabels), + "connections": newStreamUpstreamServerMetric(namespace, "connections", "Total number of client connections forwarded to this server", streamUpstreamServerVariableLabelNames, constLabels), + "connect_time": newStreamUpstreamServerMetric(namespace, "connect_time", "Average time to connect to the upstream server", streamUpstreamServerVariableLabelNames, constLabels), + "first_byte_time": newStreamUpstreamServerMetric(namespace, "first_byte_time", "Average time to receive the first byte of data", streamUpstreamServerVariableLabelNames, constLabels), + "response_time": newStreamUpstreamServerMetric(namespace, "response_time", "Average time to receive the last byte of data", streamUpstreamServerVariableLabelNames, constLabels), + "health_checks_checks": newStreamUpstreamServerMetric(namespace, "health_checks_checks", "Total health check requests", streamUpstreamServerVariableLabelNames, constLabels), + "health_checks_fails": newStreamUpstreamServerMetric(namespace, "health_checks_fails", "Failed health checks", streamUpstreamServerVariableLabelNames, constLabels), + "health_checks_unhealthy": newStreamUpstreamServerMetric(namespace, "health_checks_unhealthy", "How many times the server became unhealthy (state 'unhealthy')", streamUpstreamServerVariableLabelNames, constLabels), + "ssl_handshakes": newStreamUpstreamServerMetric(namespace, "ssl_handshakes", "Successful SSL handshakes", streamUpstreamServerVariableLabelNames, constLabels), + "ssl_handshakes_failed": newStreamUpstreamServerMetric(namespace, "ssl_handshakes_failed", "Failed SSL handshakes", streamUpstreamServerVariableLabelNames, constLabels), + "ssl_session_reuses": newStreamUpstreamServerMetric(namespace, "ssl_session_reuses", "Session reuses during SSL handshake", streamUpstreamServerVariableLabelNames, constLabels), + }, + streamZoneSyncMetrics: map[string]*prometheus.Desc{ + "bytes_in": newStreamZoneSyncMetric(namespace, "bytes_in", "Bytes received by this node", constLabels), + "bytes_out": newStreamZoneSyncMetric(namespace, "bytes_out", "Bytes sent by this node", constLabels), + "msgs_in": newStreamZoneSyncMetric(namespace, "msgs_in", "Total messages received by this node", constLabels), + "msgs_out": newStreamZoneSyncMetric(namespace, "msgs_out", "Total messages sent by this node", constLabels), + "nodes_online": newStreamZoneSyncMetric(namespace, "nodes_online", "Number of peers this node is connected to", constLabels), + "records_pending": newStreamZoneSyncZoneMetric(namespace, "records_pending", "The number of records that need to be sent to the cluster", constLabels), + "records_total": newStreamZoneSyncZoneMetric(namespace, "records_total", "The total number of records stored in the shared memory zone", constLabels), + }, + locationZoneMetrics: map[string]*prometheus.Desc{ + "requests": newLocationZoneMetric(namespace, "requests", "Total client requests", constLabels), + "responses_1xx": newLocationZoneMetric(namespace, "responses", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "1xx"})), + "responses_2xx": newLocationZoneMetric(namespace, "responses", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "2xx"})), + "responses_3xx": newLocationZoneMetric(namespace, "responses", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "3xx"})), + "responses_4xx": newLocationZoneMetric(namespace, "responses", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "4xx"})), + "responses_5xx": newLocationZoneMetric(namespace, "responses", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "5xx"})), + "discarded": newLocationZoneMetric(namespace, "discarded", "Requests completed without sending a response", constLabels), + "received": newLocationZoneMetric(namespace, "received", "Bytes received from clients", constLabels), + "sent": newLocationZoneMetric(namespace, "sent", "Bytes sent to clients", constLabels), + "codes_100": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "100"})), + "codes_101": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "101"})), + "codes_102": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "102"})), + "codes_200": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "200"})), + "codes_201": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "201"})), + "codes_202": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "202"})), + "codes_204": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "204"})), + "codes_206": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "206"})), + "codes_300": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "300"})), + "codes_301": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "301"})), + "codes_302": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "302"})), + "codes_303": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "303"})), + "codes_304": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "304"})), + "codes_307": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "307"})), + "codes_400": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "400"})), + "codes_401": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "401"})), + "codes_403": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "403"})), + "codes_404": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "404"})), + "codes_405": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "405"})), + "codes_408": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "408"})), + "codes_409": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "409"})), + "codes_411": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "411"})), + "codes_412": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "412"})), + "codes_413": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "413"})), + "codes_414": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "414"})), + "codes_415": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "415"})), + "codes_416": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "416"})), + "codes_429": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "429"})), + "codes_444": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "444"})), + "codes_494": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "494"})), + "codes_495": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "495"})), + "codes_496": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "496"})), + "codes_497": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "497"})), + "codes_499": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "499"})), + "codes_500": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "500"})), + "codes_501": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "501"})), + "codes_502": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "502"})), + "codes_503": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "503"})), + "codes_504": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "504"})), + "codes_507": newLocationZoneMetric(namespace, "responses_codes", "Total responses sent to clients", MergeLabels(constLabels, prometheus.Labels{"code": "507"})), + }, + resolverMetrics: map[string]*prometheus.Desc{ + "name": newResolverMetric(namespace, "name", "Total requests to resolve names to addresses", constLabels), + "srv": newResolverMetric(namespace, "srv", "Total requests to resolve SRV records", constLabels), + "addr": newResolverMetric(namespace, "addr", "Total requests to resolve addresses to names", constLabels), + "noerror": newResolverMetric(namespace, "noerror", "Total number of successful responses", constLabels), + "formerr": newResolverMetric(namespace, "formerr", "Total n + +-- Chunk 28 -- +// nginx_plus.go:559-567 + "connection_accepted": newWorkerMetric(namespace, "connection_accepted", "The total number of accepted client connections", constLabels), + "connection_dropped": newWorkerMetric(namespace, "connection_dropped", "The total number of dropped client connections", constLabels), + "connection_active": newWorkerMetric(namespace, "connection_active", "The current number of active client connections", constLabels), + "connection_idle": newWorkerMetric(namespace, "connection_idle", "The current number of idle client connections", constLabels), + "http_requests_total": newWorkerMetric(namespace, "http_requests_total", "The total number of client requests received by the worker process", constLabels), + "http_requests_current": newWorkerMetric(namespace, "http_requests_current", "The current number of client requests that are currently being processed by the worker process", constLabels), + }, + } +} + +-- Chunk 29 -- +// nginx_plus.go:571-619 +func (c *NginxPlusCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.upMetric.Desc() + + for _, m := range c.totalMetrics { + ch <- m + } + for _, m := range c.serverZoneMetrics { + ch <- m + } + for _, m := range c.upstreamMetrics { + ch <- m + } + for _, m := range c.upstreamServerMetrics { + ch <- m + } + for _, m := range c.streamServerZoneMetrics { + ch <- m + } + for _, m := range c.streamUpstreamMetrics { + ch <- m + } + for _, m := range c.streamUpstreamServerMetrics { + ch <- m + } + for _, m := range c.streamZoneSyncMetrics { + ch <- m + } + for _, m := range c.locationZoneMetrics { + ch <- m + } + for _, m := range c.resolverMetrics { + ch <- m + } + for _, m := range c.limitRequestMetrics { + ch <- m + } + for _, m := range c.limitConnectionMetrics { + ch <- m + } + for _, m := range c.streamLimitConnectionMetrics { + ch <- m + } + for _, m := range c.cacheZoneMetrics { + ch <- m + } + for _, m := range c.workerMetrics { + ch <- m + } +} + +-- Chunk 30 -- +// nginx_plus.go:622-771 +func (c *NginxPlusCollector) Collect(ch chan<- prometheus.Metric) { + c.mutex.Lock() // To protect metrics from concurrent collects + defer c.mutex.Unlock() + + // FIXME: https://github.com/nginx/nginx-prometheus-exporter/issues/858 + stats, err := c.nginxClient.GetStats(context.TODO()) + if err != nil { + c.upMetric.Set(nginxDown) + ch <- c.upMetric + c.logger.Warn("error getting stats", "error", err.Error()) + return + } + + c.upMetric.Set(nginxUp) + ch <- c.upMetric + + ch <- prometheus.MustNewConstMetric(c.totalMetrics["connections_accepted"], + prometheus.CounterValue, float64(stats.Connections.Accepted)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["connections_dropped"], + prometheus.CounterValue, float64(stats.Connections.Dropped)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["connections_active"], + prometheus.GaugeValue, float64(stats.Connections.Active)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["connections_idle"], + prometheus.GaugeValue, float64(stats.Connections.Idle)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["http_requests_total"], + prometheus.CounterValue, float64(stats.HTTPRequests.Total)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["http_requests_current"], + prometheus.GaugeValue, float64(stats.HTTPRequests.Current)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["ssl_handshakes"], + prometheus.CounterValue, float64(stats.SSL.Handshakes)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["ssl_handshakes_failed"], + prometheus.CounterValue, float64(stats.SSL.HandshakesFailed)) + ch <- prometheus.MustNewConstMetric(c.totalMetrics["ssl_session_reuses"], + prometheus.CounterValue, float64(stats.SSL.SessionReuses)) + + for name, zone := range stats.ServerZones { + labelValues := []string{name} + varLabelValues := c.getServerZoneLabelValues(name) + + if c.variableLabelNames.ServerZoneVariableLabelNames != nil && len(varLabelValues) != len(c.variableLabelNames.ServerZoneVariableLabelNames) { + c.logger.Warn("wrong number of labels for http zone, empty labels will be used instead", "zone", name, "expected", len(c.variableLabelNames.ServerZoneVariableLabelNames), "got", len(varLabelValues)) + for range c.variableLabelNames.ServerZoneVariableLabelNames { + labelValues = append(labelValues, "") + } + } else { + labelValues = append(labelValues, varLabelValues...) + } + + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["processing"], + prometheus.GaugeValue, float64(zone.Processing), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["requests"], + prometheus.CounterValue, float64(zone.Requests), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["responses_1xx"], + prometheus.CounterValue, float64(zone.Responses.Responses1xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["responses_2xx"], + prometheus.CounterValue, float64(zone.Responses.Responses2xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["responses_3xx"], + prometheus.CounterValue, float64(zone.Responses.Responses3xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["responses_4xx"], + prometheus.CounterValue, float64(zone.Responses.Responses4xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["responses_5xx"], + prometheus.CounterValue, float64(zone.Responses.Responses5xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["discarded"], + prometheus.CounterValue, float64(zone.Discarded), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["received"], + prometheus.CounterValue, float64(zone.Received), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["sent"], + prometheus.CounterValue, float64(zone.Sent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_100"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPContinue), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_101"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSwitchingProtocols), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_102"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPProcessing), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_200"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPOk), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_201"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPCreated), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_202"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPAccepted), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_204"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNoContent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_206"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPPartialContent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_300"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSpecialResponse), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_301"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPMovedPermanently), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_302"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPMovedTemporarily), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_303"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSeeOther), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_304"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotModified), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_307"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPTemporaryRedirect), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_400"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPBadRequest), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_401"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPUnauthorized), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_403"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPForbidden), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_404"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotFound), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_405"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotAllowed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_408"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestTimeOut), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_409"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPConflict), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_411"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPLengthRequired), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_412"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPPreconditionFailed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_413"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestEntityTooLarge), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_414"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestURITooLarge), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_415"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPUnsupportedMediaType), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_416"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRangeNotSatisfiable), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_429"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPTooManyRequests), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_444"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPClose), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_494"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestHeaderTooLarge), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_495"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSCertError), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_496"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSNoCert), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_497"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPToHTTPS), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_499"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPClientClosedRequest), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_500"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPInternalServerError), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_501"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotImplemented), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_502"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPBadGateway), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_503"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPServiceUnavailable), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_504"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPGatewayTimeOut), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["codes_507"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPInsufficientStorage), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["ssl_handshakes"], + prometheus.CounterValue, float64(zone.SSL.Handshakes), labelValues...) + +-- Chunk 31 -- +// nginx_plus.go:772-921 + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["ssl_handshakes_failed"], + prometheus.CounterValue, float64(zone.SSL.HandshakesFailed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.serverZoneMetrics["ssl_session_reuses"], + prometheus.CounterValue, float64(zone.SSL.SessionReuses), labelValues...) + } + + for name, zone := range stats.StreamServerZones { + labelValues := []string{name} + varLabelValues := c.getStreamServerZoneLabelValues(name) + + if c.variableLabelNames.StreamServerZoneVariableLabelNames != nil && len(varLabelValues) != len(c.variableLabelNames.StreamServerZoneVariableLabelNames) { + c.logger.Warn("wrong number of labels for stream server zone, empty labels will be used instead", "zone", name, "expected", len(c.variableLabelNames.StreamServerZoneVariableLabelNames), "got", len(varLabelValues)) + for range c.variableLabelNames.StreamServerZoneVariableLabelNames { + labelValues = append(labelValues, "") + } + } else { + labelValues = append(labelValues, varLabelValues...) + } + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["processing"], + prometheus.GaugeValue, float64(zone.Processing), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["connections"], + prometheus.CounterValue, float64(zone.Connections), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["sessions_2xx"], + prometheus.CounterValue, float64(zone.Sessions.Sessions2xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["sessions_4xx"], + prometheus.CounterValue, float64(zone.Sessions.Sessions4xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["sessions_5xx"], + prometheus.CounterValue, float64(zone.Sessions.Sessions5xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["discarded"], + prometheus.CounterValue, float64(zone.Discarded), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["received"], + prometheus.CounterValue, float64(zone.Received), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["sent"], + prometheus.CounterValue, float64(zone.Sent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["ssl_handshakes"], + prometheus.CounterValue, float64(zone.SSL.Handshakes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["ssl_handshakes_failed"], + prometheus.CounterValue, float64(zone.SSL.HandshakesFailed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamServerZoneMetrics["ssl_session_reuses"], + prometheus.CounterValue, float64(zone.SSL.SessionReuses), labelValues...) + } + + for name, upstream := range stats.Upstreams { + for _, peer := range upstream.Peers { + labelValues := []string{name, peer.Server} + varLabelValues := c.getUpstreamServerLabelValues(name) + + if c.variableLabelNames.UpstreamServerVariableLabelNames != nil && len(varLabelValues) != len(c.variableLabelNames.UpstreamServerVariableLabelNames) { + c.logger.Warn("wrong number of labels for upstream, empty labels will be used instead", "upstream", name, "expected", len(c.variableLabelNames.UpstreamServerVariableLabelNames), "got", len(varLabelValues)) + for range c.variableLabelNames.UpstreamServerVariableLabelNames { + labelValues = append(labelValues, "") + } + } else { + labelValues = append(labelValues, varLabelValues...) + } + + upstreamServer := fmt.Sprintf("%v/%v", name, peer.Server) + varPeerLabelValues := c.getUpstreamServerPeerLabelValues(upstreamServer) + if c.variableLabelNames.UpstreamServerPeerVariableLabelNames != nil && len(varPeerLabelValues) != len(c.variableLabelNames.UpstreamServerPeerVariableLabelNames) { + c.logger.Warn("wrong number of labels for upstream peer, empty labels will be used instead", "upstream", name, "peer", peer.Server, "expected", len(c.variableLabelNames.UpstreamServerPeerVariableLabelNames), "got", len(varPeerLabelValues)) + for range c.variableLabelNames.UpstreamServerPeerVariableLabelNames { + labelValues = append(labelValues, "") + } + } else { + labelValues = append(labelValues, varPeerLabelValues...) + } + + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["state"], + prometheus.GaugeValue, upstreamServerStates[peer.State], labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["active"], + prometheus.GaugeValue, float64(peer.Active), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["limit"], + prometheus.GaugeValue, float64(peer.MaxConns), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["requests"], + prometheus.CounterValue, float64(peer.Requests), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["responses_1xx"], + prometheus.CounterValue, float64(peer.Responses.Responses1xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["responses_2xx"], + prometheus.CounterValue, float64(peer.Responses.Responses2xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["responses_3xx"], + prometheus.CounterValue, float64(peer.Responses.Responses3xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["responses_4xx"], + prometheus.CounterValue, float64(peer.Responses.Responses4xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["responses_5xx"], + prometheus.CounterValue, float64(peer.Responses.Responses5xx), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["sent"], + prometheus.CounterValue, float64(peer.Sent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["received"], + prometheus.CounterValue, float64(peer.Received), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["fails"], + prometheus.CounterValue, float64(peer.Fails), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["unavail"], + prometheus.CounterValue, float64(peer.Unavail), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["header_time"], + prometheus.GaugeValue, float64(peer.HeaderTime), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["response_time"], + prometheus.GaugeValue, float64(peer.ResponseTime), labelValues...) + + if peer.HealthChecks != (plusclient.HealthChecks{}) { + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["health_checks_checks"], + prometheus.CounterValue, float64(peer.HealthChecks.Checks), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["health_checks_fails"], + prometheus.CounterValue, float64(peer.HealthChecks.Fails), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["health_checks_unhealthy"], + prometheus.CounterValue, float64(peer.HealthChecks.Unhealthy), labelValues...) + } + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_100"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPContinue), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_101"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPSwitchingProtocols), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_102"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPProcessing), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_200"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPOk), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_201"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPCreated), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_202"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPAccepted), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_204"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPNoContent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_206"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPPartialContent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_300"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPSpecialResponse), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_301"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPMovedPermanently), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_302"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPMovedTemporarily), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_303"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPSeeOther), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_304"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPNotModified), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_307"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPTemporaryRedirect), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_400"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPBadRequest), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_401"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPUnauthorized), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_403"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPForbidden), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_404"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPNotFound), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_405"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPNotAllowed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_408"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPRequestTimeOut), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_409"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPConflict), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_411"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPLengthRequired), labelValues...) + +-- Chunk 32 -- +// nginx_plus.go:922-1071 + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_412"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPPreconditionFailed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_413"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPRequestEntityTooLarge), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_414"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPRequestURITooLarge), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_415"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPUnsupportedMediaType), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_416"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPRangeNotSatisfiable), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_429"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPTooManyRequests), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_444"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPClose), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_494"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPRequestHeaderTooLarge), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_495"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPSCertError), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_496"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPSNoCert), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_497"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPToHTTPS), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_499"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPClientClosedRequest), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_500"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPInternalServerError), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_501"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPNotImplemented), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_502"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPBadGateway), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_503"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPServiceUnavailable), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_504"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPGatewayTimeOut), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["codes_507"], + prometheus.CounterValue, float64(peer.Responses.Codes.HTTPInsufficientStorage), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["ssl_handshakes"], + prometheus.CounterValue, float64(peer.SSL.Handshakes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["ssl_handshakes_failed"], + prometheus.CounterValue, float64(peer.SSL.HandshakesFailed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.upstreamServerMetrics["ssl_session_reuses"], + prometheus.CounterValue, float64(peer.SSL.SessionReuses), labelValues...) + } + ch <- prometheus.MustNewConstMetric(c.upstreamMetrics["keepalive"], + prometheus.GaugeValue, float64(upstream.Keepalive), name) + ch <- prometheus.MustNewConstMetric(c.upstreamMetrics["zombies"], + prometheus.GaugeValue, float64(upstream.Zombies), name) + } + + for name, upstream := range stats.StreamUpstreams { + for _, peer := range upstream.Peers { + labelValues := []string{name, peer.Server} + varLabelValues := c.getStreamUpstreamServerLabelValues(name) + + if c.variableLabelNames.StreamUpstreamServerVariableLabelNames != nil && len(varLabelValues) != len(c.variableLabelNames.StreamUpstreamServerVariableLabelNames) { + c.logger.Warn("wrong number of labels for stream server, empty labels will be used instead", "server", name, "labels", c.variableLabelNames.StreamUpstreamServerVariableLabelNames, "values", varLabelValues) + for range c.variableLabelNames.StreamUpstreamServerVariableLabelNames { + labelValues = append(labelValues, "") + } + } else { + labelValues = append(labelValues, varLabelValues...) + } + + upstreamServer := fmt.Sprintf("%v/%v", name, peer.Server) + varPeerLabelValues := c.getStreamUpstreamServerPeerLabelValues(upstreamServer) + if c.variableLabelNames.StreamUpstreamServerPeerVariableLabelNames != nil && len(varPeerLabelValues) != len(c.variableLabelNames.StreamUpstreamServerPeerVariableLabelNames) { + c.logger.Warn("wrong number of labels for stream upstream peer, empty labels will be used instead", "server", upstreamServer, "labels", c.variableLabelNames.StreamUpstreamServerPeerVariableLabelNames, "values", varPeerLabelValues) + for range c.variableLabelNames.StreamUpstreamServerPeerVariableLabelNames { + labelValues = append(labelValues, "") + } + } else { + labelValues = append(labelValues, varPeerLabelValues...) + } + + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["state"], + prometheus.GaugeValue, upstreamServerStates[peer.State], labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["active"], + prometheus.GaugeValue, float64(peer.Active), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["limit"], + prometheus.GaugeValue, float64(peer.MaxConns), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["connections"], + prometheus.CounterValue, float64(peer.Connections), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["connect_time"], + prometheus.GaugeValue, float64(peer.ConnectTime), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["first_byte_time"], + prometheus.GaugeValue, float64(peer.FirstByteTime), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["response_time"], + prometheus.GaugeValue, float64(peer.ResponseTime), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["sent"], + prometheus.CounterValue, float64(peer.Sent), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["received"], + prometheus.CounterValue, float64(peer.Received), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["fails"], + prometheus.CounterValue, float64(peer.Fails), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["unavail"], + prometheus.CounterValue, float64(peer.Unavail), labelValues...) + if peer.HealthChecks != (plusclient.HealthChecks{}) { + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["health_checks_checks"], + prometheus.CounterValue, float64(peer.HealthChecks.Checks), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["health_checks_fails"], + prometheus.CounterValue, float64(peer.HealthChecks.Fails), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["health_checks_unhealthy"], + prometheus.CounterValue, float64(peer.HealthChecks.Unhealthy), labelValues...) + } + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["ssl_handshakes"], + prometheus.CounterValue, float64(peer.SSL.Handshakes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["ssl_handshakes_failed"], + prometheus.CounterValue, float64(peer.SSL.HandshakesFailed), labelValues...) + ch <- prometheus.MustNewConstMetric(c.streamUpstreamServerMetrics["ssl_session_reuses"], + prometheus.CounterValue, float64(peer.SSL.SessionReuses), labelValues...) + } + ch <- prometheus.MustNewConstMetric(c.streamUpstreamMetrics["zombies"], + prometheus.GaugeValue, float64(upstream.Zombies), name) + } + + if stats.StreamZoneSync != nil { + for name, zone := range stats.StreamZoneSync.Zones { + ch <- prometheus.MustNewConstMetric(c.streamZoneSyncMetrics["records_pending"], + prometheus.GaugeValue, float64(zone.RecordsPending), name) + ch <- prometheus.MustNewConstMetric(c.streamZoneSyncMetrics["records_total"], + prometheus.GaugeValue, float64(zone.RecordsTotal), name) + } + + ch <- prometheus.MustNewConstMetric(c.streamZoneSyncMetrics["bytes_in"], + prometheus.CounterValue, float64(stats.StreamZoneSync.Status.BytesIn)) + ch <- prometheus.MustNewConstMetric(c.streamZoneSyncMetrics["bytes_out"], + prometheus.CounterValue, float64(stats.StreamZoneSync.Status.BytesOut)) + ch <- prometheus.MustNewConstMetric(c.streamZoneSyncMetrics["msgs_in"], + prometheus.CounterValue, float64(stats.StreamZoneSync.Status.MsgsIn)) + ch <- prometheus.MustNewConstMetric(c.streamZoneSyncMetrics["msgs_out"], + prometheus.CounterValue, float64(stats.StreamZoneSync.Status.MsgsOut)) + ch <- prometheus.MustNewConstMetric(c.streamZoneSyncMetrics["nodes_online"], + prometheus.GaugeValue, float64(stats.StreamZoneSync.Status.NodesOnline)) + } + + for name, zone := range stats.LocationZones { + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["requests"], + prometheus.CounterValue, float64(zone.Requests), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["responses_1xx"], + prometheus.CounterValue, float64(zone.Responses.Responses1xx), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["responses_2xx"], + prometheus.CounterValue, float64(zone.Responses.Responses2xx), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["responses_3xx"], + prometheus.CounterValue, float64(zone.Responses.Responses3xx), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["responses_4xx"], + prometheus.CounterValue, float64(zone.Responses.Responses4xx), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["responses_5xx"], + prometheus.CounterValue, float64(zone.Responses.Responses5xx), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["discarded"], + prometheus.CounterValue, float64(zone.Discarded), name) + +-- Chunk 33 -- +// nginx_plus.go:1072-1221 + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["received"], + prometheus.CounterValue, float64(zone.Received), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["sent"], + prometheus.CounterValue, float64(zone.Sent), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_100"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPContinue), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_101"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSwitchingProtocols), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_102"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPProcessing), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_200"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPOk), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_201"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPCreated), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_202"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPAccepted), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_204"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNoContent), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_206"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPPartialContent), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_300"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSpecialResponse), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_301"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPMovedPermanently), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_302"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPMovedTemporarily), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_303"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSeeOther), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_304"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotModified), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_307"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPTemporaryRedirect), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_400"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPBadRequest), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_401"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPUnauthorized), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_403"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPForbidden), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_404"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotFound), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_405"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotAllowed), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_408"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestTimeOut), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_409"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPConflict), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_411"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPLengthRequired), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_412"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPPreconditionFailed), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_413"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestEntityTooLarge), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_414"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestURITooLarge), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_415"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPUnsupportedMediaType), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_416"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRangeNotSatisfiable), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_429"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPTooManyRequests), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_444"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPClose), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_494"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPRequestHeaderTooLarge), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_495"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSCertError), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_496"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPSNoCert), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_497"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPToHTTPS), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_499"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPClientClosedRequest), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_500"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPInternalServerError), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_501"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPNotImplemented), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_502"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPBadGateway), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_503"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPServiceUnavailable), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_504"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPGatewayTimeOut), name) + ch <- prometheus.MustNewConstMetric(c.locationZoneMetrics["codes_507"], + prometheus.CounterValue, float64(zone.Responses.Codes.HTTPInsufficientStorage), name) + } + + for name, zone := range stats.Resolvers { + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["name"], + prometheus.CounterValue, float64(zone.Requests.Name), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["srv"], + prometheus.CounterValue, float64(zone.Requests.Srv), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["addr"], + prometheus.CounterValue, float64(zone.Requests.Addr), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["noerror"], + prometheus.CounterValue, float64(zone.Responses.Noerror), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["formerr"], + prometheus.CounterValue, float64(zone.Responses.Formerr), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["servfail"], + prometheus.CounterValue, float64(zone.Responses.Servfail), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["nxdomain"], + prometheus.CounterValue, float64(zone.Responses.Nxdomain), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["notimp"], + prometheus.CounterValue, float64(zone.Responses.Notimp), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["refused"], + prometheus.CounterValue, float64(zone.Responses.Refused), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["timedout"], + prometheus.CounterValue, float64(zone.Responses.Timedout), name) + ch <- prometheus.MustNewConstMetric(c.resolverMetrics["unknown"], + prometheus.CounterValue, float64(zone.Responses.Unknown), name) + } + + for name, zone := range stats.HTTPLimitRequests { + ch <- prometheus.MustNewConstMetric(c.limitRequestMetrics["passed"], prometheus.CounterValue, float64(zone.Passed), name) + ch <- prometheus.MustNewConstMetric(c.limitRequestMetrics["rejected"], prometheus.CounterValue, float64(zone.Rejected), name) + ch <- prometheus.MustNewConstMetric(c.limitRequestMetrics["delayed"], prometheus.CounterValue, float64(zone.Delayed), name) + ch <- prometheus.MustNewConstMetric(c.limitRequestMetrics["rejected_dry_run"], prometheus.CounterValue, float64(zone.RejectedDryRun), name) + ch <- prometheus.MustNewConstMetric(c.limitRequestMetrics["delayed_dry_run"], prometheus.CounterValue, float64(zone.DelayedDryRun), name) + } + + for name, zone := range stats.HTTPLimitConnections { + ch <- prometheus.MustNewConstMetric(c.limitConnectionMetrics["passed"], prometheus.CounterValue, float64(zone.Passed), name) + ch <- prometheus.MustNewConstMetric(c.limitConnectionMetrics["rejected"], prometheus.CounterValue, float64(zone.Rejected), name) + ch <- prometheus.MustNewConstMetric(c.limitConnectionMetrics["rejected_dry_run"], prometheus.CounterValue, float64(zone.RejectedDryRun), name) + } + + for name, zone := range stats.StreamLimitConnections { + ch <- prometheus.MustNewConstMetric(c.streamLimitConnectionMetrics["passed"], prometheus.CounterValue, float64(zone.Passed), name) + ch <- prometheus.MustNewConstMetric(c.streamLimitConnectionMetrics["rejected"], prometheus.CounterValue, float64(zone.Rejected), name) + ch <- prometheus.MustNewConstMetric(c.streamLimitConnectionMetrics["rejected_dry_run"], prometheus.CounterValue, float64(zone.RejectedDryRun), name) + } + + for name, zone := range stats.Caches { + labelValues := []string{name} + varLabelValues := c.getCacheZoneLabelValues(name) + + if c.variableLabelNames.CacheZoneVariableLabelNames != nil && len(varLabelValues) != len(c.variableLabelNames.CacheZoneVariableLabelNames) { + c.logger.Warn("wrong number of labels for cache zone, empty labels will be used instead", "zone", name, "labels", c.variableLabelNames.CacheZoneVariableLabelNames, "values", varLabelValues) + for range c.variableLabelNames.CacheZoneVariableLabelNames { + labelValues = append(labelValues, "") + } + } else { + labelValues = append(labelValues, varLabelValues...) + } + + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["size"], prometheus.GaugeValue, float64(zone.Size), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["max_size"], prometheus.GaugeValue, float64(zone.MaxSize), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["cold"], prometheus.GaugeValue, booleanToFloat64[zone.Cold], labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["hit_responses"], prometheus.CounterValue, float64(zone.Hit.Responses), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["hit_bytes"], prometheus.CounterValue, float64(zone.Hit.Bytes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["stale_responses"], prometheus.CounterValue, float64(zone.Stale.Responses), labelValues...) + +-- Chunk 34 -- +// nginx_plus.go:1222-1249 + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["stale_bytes"], prometheus.CounterValue, float64(zone.Stale.Bytes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["updating_responses"], prometheus.CounterValue, float64(zone.Updating.Responses), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["updating_bytes"], prometheus.CounterValue, float64(zone.Updating.Bytes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["revalidated_responses"], prometheus.CounterValue, float64(zone.Revalidated.Responses), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["revalidated_bytes"], prometheus.CounterValue, float64(zone.Revalidated.Bytes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["miss_responses"], prometheus.CounterValue, float64(zone.Miss.Responses), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["miss_bytes"], prometheus.CounterValue, float64(zone.Miss.Bytes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["expired_responses"], prometheus.CounterValue, float64(zone.Expired.Responses), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["expired_bytes"], prometheus.CounterValue, float64(zone.Expired.Bytes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["expired_responses_written"], prometheus.CounterValue, float64(zone.Expired.ResponsesWritten), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["expired_bytes_written"], prometheus.CounterValue, float64(zone.Expired.BytesWritten), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["bypass_responses"], prometheus.CounterValue, float64(zone.Bypass.Responses), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["bypass_bytes"], prometheus.CounterValue, float64(zone.Bypass.Bytes), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["bypass_responses_written"], prometheus.CounterValue, float64(zone.Bypass.ResponsesWritten), labelValues...) + ch <- prometheus.MustNewConstMetric(c.cacheZoneMetrics["bypass_bytes_written"], prometheus.CounterValue, float64(zone.Bypass.BytesWritten), labelValues...) + } + + for id, worker := range stats.Workers { + workerID := strconv.FormatInt(int64(id), 10) + workerPID := strconv.FormatUint(worker.ProcessID, 10) + ch <- prometheus.MustNewConstMetric(c.workerMetrics["connection_accepted"], prometheus.CounterValue, float64(worker.Connections.Accepted), workerID, workerPID) + ch <- prometheus.MustNewConstMetric(c.workerMetrics["connection_dropped"], prometheus.CounterValue, float64(worker.Connections.Dropped), workerID, workerPID) + ch <- prometheus.MustNewConstMetric(c.workerMetrics["connection_active"], prometheus.GaugeValue, float64(worker.Connections.Active), workerID, workerPID) + ch <- prometheus.MustNewConstMetric(c.workerMetrics["connection_idle"], prometheus.GaugeValue, float64(worker.Connections.Idle), workerID, workerPID) + ch <- prometheus.MustNewConstMetric(c.workerMetrics["http_requests_total"], prometheus.CounterValue, float64(worker.HTTP.HTTPRequests.Total), workerID, workerPID) + ch <- prometheus.MustNewConstMetric(c.workerMetrics["http_requests_current"], prometheus.GaugeValue, float64(worker.HTTP.HTTPRequests.Current), workerID, workerPID) + } +} + +-- Chunk 35 -- +// nginx_plus.go:1265-1269 +func newServerZoneMetric(namespace string, metricName string, docString string, variableLabelNames []string, constLabels prometheus.Labels) *prometheus.Desc { + labels := []string{"server_zone"} + labels = append(labels, variableLabelNames...) + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "server_zone", metricName), docString, labels, constLabels) +} + +-- Chunk 36 -- +// nginx_plus.go:1271-1275 +func newStreamServerZoneMetric(namespace string, metricName string, docString string, variableLabelNames []string, constLabels prometheus.Labels) *prometheus.Desc { + labels := []string{"server_zone"} + labels = append(labels, variableLabelNames...) + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "stream_server_zone", metricName), docString, labels, constLabels) +} + +-- Chunk 37 -- +// nginx_plus.go:1277-1279 +func newUpstreamMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "upstream", metricName), docString, []string{"upstream"}, constLabels) +} + +-- Chunk 38 -- +// nginx_plus.go:1281-1283 +func newStreamUpstreamMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "stream_upstream", metricName), docString, []string{"upstream"}, constLabels) +} + +-- Chunk 39 -- +// nginx_plus.go:1285-1289 +func newUpstreamServerMetric(namespace string, metricName string, docString string, variableLabelNames []string, constLabels prometheus.Labels) *prometheus.Desc { + labels := []string{"upstream", "server"} + labels = append(labels, variableLabelNames...) + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "upstream_server", metricName), docString, labels, constLabels) +} + +-- Chunk 40 -- +// nginx_plus.go:1291-1295 +func newStreamUpstreamServerMetric(namespace string, metricName string, docString string, variableLabelNames []string, constLabels prometheus.Labels) *prometheus.Desc { + labels := []string{"upstream", "server"} + labels = append(labels, variableLabelNames...) + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "stream_upstream_server", metricName), docString, labels, constLabels) +} + +-- Chunk 41 -- +// nginx_plus.go:1297-1299 +func newStreamZoneSyncMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "stream_zone_sync_status", metricName), docString, nil, constLabels) +} + +-- Chunk 42 -- +// nginx_plus.go:1301-1303 +func newStreamZoneSyncZoneMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "stream_zone_sync_zone", metricName), docString, []string{"zone"}, constLabels) +} + +-- Chunk 43 -- +// nginx_plus.go:1305-1307 +func newLocationZoneMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "location_zone", metricName), docString, []string{"location_zone"}, constLabels) +} + +-- Chunk 44 -- +// nginx_plus.go:1309-1311 +func newResolverMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "resolver", metricName), docString, []string{"resolver"}, constLabels) +} + +-- Chunk 45 -- +// nginx_plus.go:1313-1315 +func newLimitRequestMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "limit_request", metricName), docString, []string{"zone"}, constLabels) +} + +-- Chunk 46 -- +// nginx_plus.go:1317-1319 +func newLimitConnectionMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "limit_connection", metricName), docString, []string{"zone"}, constLabels) +} + +-- Chunk 47 -- +// nginx_plus.go:1321-1323 +func newStreamLimitConnectionMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "stream_limit_connection", metricName), docString, []string{"zone"}, constLabels) +} + +-- Chunk 48 -- +// nginx_plus.go:1325-1329 +func newCacheZoneMetric(namespace string, metricName string, docString string, variableLabelNames []string, constLabels prometheus.Labels) *prometheus.Desc { + labels := []string{"zone"} + labels = append(labels, variableLabelNames...) + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "cache", metricName), docString, labels, constLabels) +} + +-- Chunk 49 -- +// nginx_plus.go:1331-1333 +func newWorkerMetric(namespace string, metricName string, docString string, constLabels prometheus.Labels) *prometheus.Desc { + return prometheus.NewDesc(prometheus.BuildFQName(namespace, "worker", metricName), docString, []string{"id", "pid"}, constLabels) +} + +=== File: collector/helper_test.go === + +-- Chunk 1 -- +// helper_test.go:8-44 +func TestMergeLabels(t *testing.T) { + t.Parallel() + + tests := []struct { + mapA map[string]string + mapB map[string]string + want map[string]string + name string + }{ + { + name: "base case", + mapA: map[string]string{"a": "is here"}, + mapB: map[string]string{"b": "is here"}, + want: map[string]string{"a": "is here", "b": "is here"}, + }, + { + name: "overwrite key case", + mapA: map[string]string{"a": "is here"}, + mapB: map[string]string{"b": "is here", "a": "is now here"}, + want: map[string]string{"a": "is now here", "b": "is here"}, + }, + { + name: "empty maps case", + mapA: nil, + mapB: nil, + want: map[string]string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if got := MergeLabels(tt.mapA, tt.mapB); !reflect.DeepEqual(got, tt.want) { + t.Errorf("mergeLabels() = %v, want %v", got, tt.want) + } + }) + } +} + +=== File: collector/helper.go === + +-- Chunk 1 -- +// helper.go:12-14 +func newGlobalMetric(namespace string, metricName string, docString string, constLabels map[string]string) *prometheus.Desc { + return prometheus.NewDesc(namespace+"_"+metricName, docString, nil, constLabels) +} + +-- Chunk 2 -- +// helper.go:16-23 +func newUpMetric(namespace string, constLabels map[string]string) prometheus.Gauge { + return prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "up", + Help: "Status of the last metric scrape", + ConstLabels: constLabels, + }) +} + +-- Chunk 3 -- +// helper.go:26-37 +func MergeLabels(a map[string]string, b map[string]string) map[string]string { + c := make(map[string]string) + + for k, v := range a { + c[k] = v + } + for k, v := range b { + c[k] = v + } + + return c +} + +=== File: collector/nginx.go === + +-- Chunk 1 -- +// nginx.go:12-18 +type NginxCollector struct { + upMetric prometheus.Gauge + logger *slog.Logger + nginxClient *client.NginxClient + metrics map[string]*prometheus.Desc + mutex sync.Mutex +} + +-- Chunk 2 -- +// nginx.go:21-36 +func NewNginxCollector(nginxClient *client.NginxClient, namespace string, constLabels map[string]string, logger *slog.Logger) *NginxCollector { + return &NginxCollector{ + nginxClient: nginxClient, + logger: logger, + metrics: map[string]*prometheus.Desc{ + "connections_active": newGlobalMetric(namespace, "connections_active", "Active client connections", constLabels), + "connections_accepted": newGlobalMetric(namespace, "connections_accepted", "Accepted client connections", constLabels), + "connections_handled": newGlobalMetric(namespace, "connections_handled", "Handled client connections", constLabels), + "connections_reading": newGlobalMetric(namespace, "connections_reading", "Connections where NGINX is reading the request header", constLabels), + "connections_writing": newGlobalMetric(namespace, "connections_writing", "Connections where NGINX is writing the response back to the client", constLabels), + "connections_waiting": newGlobalMetric(namespace, "connections_waiting", "Idle client connections", constLabels), + "http_requests_total": newGlobalMetric(namespace, "http_requests_total", "Total http requests", constLabels), + }, + upMetric: newUpMetric(namespace, constLabels), + } +} + +-- Chunk 3 -- +// nginx.go:40-46 +func (c *NginxCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.upMetric.Desc() + + for _, m := range c.metrics { + ch <- m + } +} + +-- Chunk 4 -- +// nginx.go:49-78 +func (c *NginxCollector) Collect(ch chan<- prometheus.Metric) { + c.mutex.Lock() // To protect metrics from concurrent collects + defer c.mutex.Unlock() + + stats, err := c.nginxClient.GetStubStats() + if err != nil { + c.upMetric.Set(nginxDown) + ch <- c.upMetric + c.logger.Error("error getting stats", "error", err.Error()) + return + } + + c.upMetric.Set(nginxUp) + ch <- c.upMetric + + ch <- prometheus.MustNewConstMetric(c.metrics["connections_active"], + prometheus.GaugeValue, float64(stats.Connections.Active)) + ch <- prometheus.MustNewConstMetric(c.metrics["connections_accepted"], + prometheus.CounterValue, float64(stats.Connections.Accepted)) + ch <- prometheus.MustNewConstMetric(c.metrics["connections_handled"], + prometheus.CounterValue, float64(stats.Connections.Handled)) + ch <- prometheus.MustNewConstMetric(c.metrics["connections_reading"], + prometheus.GaugeValue, float64(stats.Connections.Reading)) + ch <- prometheus.MustNewConstMetric(c.metrics["connections_writing"], + prometheus.GaugeValue, float64(stats.Connections.Writing)) + ch <- prometheus.MustNewConstMetric(c.metrics["connections_waiting"], + prometheus.GaugeValue, float64(stats.Connections.Waiting)) + ch <- prometheus.MustNewConstMetric(c.metrics["http_requests_total"], + prometheus.CounterValue, float64(stats.Requests)) +} + +=== File: scripts/completions.sh === + +-- Chunk 1 -- +// /app/repos/repo_9/scripts/completions.sh:1-7 +#!/bin/sh +set -e +rm -rf completions +mkdir completions +for shell in bash zsh; do + go run . --completion-script-$shell >completions/nginx-prometheus-exporter.$shell +done + +=== File: scripts/manpages.sh === + +-- Chunk 1 -- +// /app/repos/repo_9/scripts/manpages.sh:1-5 +#!/bin/sh +set -e +rm -rf manpages +mkdir manpages +go run . --help-man | gzip -c -9 >manpages/nginx-prometheus-exporter.1.gz + +=== File: examples/tls/README.md === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/tls/README.md:1-47 +# NGINX Prometheus Exporter with Web Configuration for TLS + +This example shows how to run NGINX Prometheus Exporter with web configuration. In this folder you will find an example +configuration `web-config.yml` that enables TLS and specifies the path to the TLS certificate and key files. +Additionally, there are two example TLS files `server.crt` and `server.key` that are used in the configuration. + +The full documentation for the web configuration can be found +[here](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). + + + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Running NGINX Prometheus Exporter with Web Configuration in TLS mode](#running-nginx-prometheus-exporter-with-web-configuration-in-tls-mode) +- [Verification](#verification) + + + +## Prerequisites + +- NGINX Prometheus Exporter binary. See the [main README](../../README.md) for installation instructions. +- NGINX or NGINX Plus running on the same machine. + +## Running NGINX Prometheus Exporter with Web Configuration in TLS mode + +You can run NGINX Prometheus Exporter with web configuration in TLS mode using the following command: + +```console +nginx-prometheus-exporter --web.config.file=web-config.yml --nginx.scrape-uri="http://127.0.0.1:8080/stub_status" +``` + +you should see an output similar to this: + +```console +... +ts=2023-07-20T02:00:26.932Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9113 +ts=2023-07-20T02:00:26.936Z caller=tls_config.go:310 level=info msg="TLS is enabled." http2=true address=[::]:9113 +``` + +Depending on your environment, you may need to specify the full path to the binary or change the path to the web +configuration file. + +## Verification + +Run `curl -k https://localhost:9113/metrics` to see the metrics exposed by the exporter. The `-k` flag is needed because +the certificate is self-signed. + +=== File: examples/tls/web-config.yml === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/tls/web-config.yml:1-3 +tls_server_config: + cert_file: server.crt + key_file: server.key + +=== File: examples/systemd/README.md === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/systemd/README.md:1-47 +# NGINX Prometheus Exporter with systemd-activated socket + +This example shows how to run NGINX Prometheus Exporter with systemd-activated socket. + + + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Customization](#customization) +- [Installation](#installation) +- [Verification](#verification) + + + +## Prerequisites + +- Linux machine with [systemd](https://www.freedesktop.org/wiki/Software/systemd/). +- NGINX Prometheus Exporter binary in `/usr/local/bin/nginx-prometheus-exporter` or a location of your choice. See the + [main README](../../README.md) for installation instructions. +- NGINX or NGINX Plus running on the same machine. + +## Customization + +Modify `nginx_exporter.service` and `nginx_exporter.socket` to match your environment. + +The default configuration assumes that NGINX Prometheus Exporter binary is located in +`/usr/local/bin/nginx-prometheus-exporter`. + +The `ExecStart` directive has the flag `--web.systemd-socket` which tells the exporter to listen on the socket specified +in the `nginx_exporter.socket` file. + +The `ListenStream` directive in `nginx_exporter.socket` specifies the socket to listen on. The default configuration +uses `9113` port, but the address can be written in various formats, for example `/run/nginx_exporter.sock`. To see the +full list of supported formats, run `man systemd.socket`. + +## Installation + +1. Copy `nginx_exporter.service` and `nginx_exporter.socket` to `/etc/systemd/system/` +2. Add a user named `nginx_exporter` +3. Run `systemctl daemon-reload` +4. Run `systemctl start nginx_exporter` +5. Run `systemctl status nginx_exporter` to check the status of the service + +## Verification + +1. Run `curl http://localhost:9113/metrics` to see the metrics exposed by the exporter. + +=== File: examples/systemd/nginx_exporter.socket === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/systemd/nginx_exporter.socket:1-8 +[Unit] +Description=NGINX Prometheus Exporter + +[Socket] +ListenStream=9113 + +[Install] +WantedBy=sockets.target + +=== File: examples/systemd/nginx_exporter.service === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/systemd/nginx_exporter.service:1-10 +[Unit] +Description=NGINX Prometheus Exporter +Requires=nginx_exporter.socket + +[Service] +User=nginx_exporter +ExecStart=/usr/local/bin/nginx-prometheus-exporter --nginx.scrape-uri=http://127.0.0.1:8080/stub_status --web.systemd-socket + +[Install] +WantedBy=multi-user.target + +=== File: examples/basic_auth/README.md === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/basic_auth/README.md:1-39 +# NGINX Prometheus Exporter with Web Configuration for Basic Authentication + +This example shows how to run NGINX Prometheus Exporter with web configuration. In this folder you will find an example +configuration `web-config.yml` that enables basic authentication. It is configured to have a single user `alice` with +password `password`. + +The full documentation for the web configuration can be found +[here](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). + + + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Running NGINX Prometheus Exporter with Web Configuration in Basic Authentication mode](#running-nginx-prometheus-exporter-with-web-configuration-in-basic-authentication-mode) +- [Verification](#verification) + + + +## Prerequisites + +- NGINX Prometheus Exporter binary. See the [main README](../../README.md) for installation instructions. +- NGINX or NGINX Plus running on the same machine. + +## Running NGINX Prometheus Exporter with Web Configuration in Basic Authentication mode + +You can run NGINX Prometheus Exporter with web configuration in Basic Authentication mode using the following command: + +```console +nginx-prometheus-exporter --web.config.file=web-config.yml --nginx.scrape-uri="http://127.0.0.1:8080/stub_status" +``` + +Depending on your environment, you may need to specify the full path to the binary or change the path to the web +configuration file. + +## Verification + +Run `curl -u alice:password http://localhost:9113/metrics` to see the metrics exposed by the exporter. Without the `-u` +flag, the request will fail with `401 Unauthorized`. + +=== File: examples/basic_auth/web-config.yml === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/basic_auth/web-config.yml:1-2 +basic_auth_users: + alice: $2y$10$6xfhlaIhUDCUl60zPxkqLudN3QjL3Lfjg5gPAWiqElTLErpxAxJbC + +=== File: examples/kubernetes/nginx-hello.yaml === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/kubernetes/nginx-hello.yaml:1-72 +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: nginx-demo + name: nginx-demo +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: nginx-demo + template: + metadata: + labels: + app.kubernetes.io/name: nginx-demo + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9113" + spec: + containers: + - image: nginxdemos/hello:latest + name: nginx-demo + ports: + - name: http + containerPort: 80 + volumeMounts: + - name: config-volume + mountPath: /etc/nginx/conf.d/status.conf + subPath: status.conf + - image: nginx/nginx-prometheus-exporter:latest + name: nginx-prometheus-exporter + args: + - "--nginx.scrape-uri=http://localhost:8080/stub_status" + ports: + - name: metrics + containerPort: 9113 + volumes: + - name: config-volume + configMap: + name: status-config +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-demo +spec: + type: NodePort + selector: + app.kubernetes.io/name: nginx-demo + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 9113 + targetPort: 9113 + name: metrics +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: status-config +data: + status.conf: |- + server { + listen 8080; + + location /stub_status { + stub_status; + } + + } + +=== File: examples/kubernetes/README.md === + +-- Chunk 1 -- +// /app/repos/repo_9/examples/kubernetes/README.md:1-53 +# NGINX Prometheus Exporter in Kubernetes + +This example shows how to run NGINX Prometheus Exporter in a Kubernetes cluster. + + + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Create a kind cluster](#create-a-kind-cluster) +- [Deploy the NGINX Hello application and NGINX Prometheus Exporter](#deploy-the-nginx-hello-application-and-nginx-prometheus-exporter) +- [Configure port forwarding](#configure-port-forwarding) +- [Verification](#verification) + + + +## Prerequisites + +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) installed. +- [kind](https://kind.sigs.k8s.io/) installed. + +## Create a kind cluster + +You can create a kind cluster with the following command: + +```console +kind create cluster +``` + +For details, see the [kind documentation](https://kind.sigs.k8s.io/docs/user/quick-start/#creating-a-cluster). + +## Deploy the NGINX Hello application and NGINX Prometheus Exporter + +You can deploy the NGINX Hello application and NGINX Prometheus Exporter with the following command: + +```console +kubectl apply -f nginx-hello.yaml +``` + +## Configure port forwarding + +Port forwarding is used to access the NGINX Hello application and NGINX Prometheus Exporter from your local machine. + +You can configure port forwarding with the following command: + +```console +kubectl port-forward service/nginx-demo 8080:80 9113:9113 +``` + +## Verification + +You can access the NGINX Hello application at [http://localhost:8080](http://localhost:8080) and the +NGINX Prometheus Exporter at [http://localhost:9113](http://localhost:9113). diff --git a/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/chroma.sqlite3 b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/chroma.sqlite3 new file mode 100644 index 000000000..326eb4b22 Binary files /dev/null and b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/chroma.sqlite3 differ diff --git a/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/data_level0.bin b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/data_level0.bin new file mode 100644 index 000000000..330b85705 Binary files /dev/null and b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/data_level0.bin differ diff --git a/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/header.bin b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/header.bin new file mode 100644 index 000000000..074f5b8bb Binary files /dev/null and b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/header.bin differ diff --git a/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/length.bin b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/length.bin new file mode 100644 index 000000000..8cf22f5e9 Binary files /dev/null and b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/length.bin differ diff --git a/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/link_lists.bin b/.kno/embedding_SBERTEmbedding_1746888631506_ced6fda/fec288b8-349c-4718-be14-a8cf39b8ac71/link_lists.bin new file mode 100644 index 000000000..e69de29bb diff --git a/SECURITY_AUDIT_Prometheus-beta.md b/SECURITY_AUDIT_Prometheus-beta.md new file mode 100644 index 000000000..3cd5f54c2 --- /dev/null +++ b/SECURITY_AUDIT_Prometheus-beta.md @@ -0,0 +1,132 @@ +# Security and Performance Analysis: NGINX Prometheus Exporter Comprehensive Audit Report + +# NGINX Prometheus Exporter Security Audit Report + +## Overview + +This security audit report provides a comprehensive analysis of the NGINX Prometheus Exporter repository, identifying potential vulnerabilities, code quality issues, and recommendations for improvement. + +## Table of Contents + +- [Security Vulnerabilities](#security-vulnerabilities) +- [Performance Considerations](#performance-considerations) +- [Code Quality](#code-quality) +- [Observability](#observability) +- [Recommendations](#recommendations) + +## Security Vulnerabilities + +### 1. TLS Configuration Risk + +_File: examples/tls/web-config.yml_ + +```yaml +tls_server_config: + cert_file: server.crt + key_file: server.key +``` + +**Issue**: Potential insecure TLS configuration with minimal validation + +**Risks**: + +- Lack of explicit cipher suite configuration +- No clear TLS version constraints +- Potential use of weak certificates + +**Suggested Fix**: + +- Implement strict TLS configuration +- Enforce TLS 1.2 or 1.3 +- Use strong cipher suites +- Implement certificate rotation mechanisms + +### 2. HTTP Client Security + +_File: collector/nginx.go_ + +**Issue**: Potential HTTP client configuration vulnerabilities + +**Risks**: + +- No explicit timeout configurations +- Potential for connection leaks +- Lack of connection pooling strategies + +**Suggested Fix**: + +- Implement default and configurable timeouts +- Use context-based request cancellation +- Configure connection pooling +- Add transport-level security checks + +## Performance Considerations + +### 1. Metric Collection Efficiency + +**Observation**: Potential performance bottlenecks in concurrent metric gathering + +**Recommendations**: + +- Implement robust goroutine management +- Use context-based cancellation +- Add request-level timeouts +- Optimize metric collection algorithms + +## Code Quality + +### 1. Modular Design + +**Strengths**: + +- Clear separation of concerns +- Distinct structs for NGINX clients and metric collectors +- Flexible configuration options + +**Recommendations**: + +- Continue maintaining architectural boundaries +- Add comprehensive interface documentation +- Implement more extensive unit testing + +## Observability + +### 1. Logging and Tracing + +**Current State**: + +- Basic logging mechanisms +- Limited internal health metrics + +**Recommendations**: + +- Enhance error logging +- Implement detailed tracing for metric collection +- Add comprehensive health check endpoints +- Create structured logging with severity levels + +## Recommendations + +1. Security Enhancements + - Implement strict TLS configurations + - Add explicit HTTP client timeout mechanisms + - Enhance error handling and logging + +2. Performance Optimization + - Optimize goroutine and connection management + - Implement efficient metric collection strategies + +3. Code Quality Improvements + - Expand test coverage + - Document configuration best practices + - Create deployment guidelines + +## Conclusion + +The NGINX Prometheus Exporter demonstrates a security-conscious design with clear opportunities for incremental improvements. By addressing the identified vulnerabilities and implementing the recommended enhancements, the project can significantly improve its security posture and performance. + +--- + +**Audit Completed**: 2025-05-10 +**Auditor**: Security Engineering Team +**Risk Level**: Low to Moderate